content
stringlengths 5
1.05M
|
|---|
class A:
test = 123
a1 = A()
a2 = A()
print(a1.test)
print(a2.test)
A.test = 321
print(a1.test)
print(a2.test)
|
import os
from keras.applications.inception_v3 import InceptionV3
from keras.applications.mobilenet_v2 import MobileNetV2
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras.metrics import categorical_accuracy
from keras import backend as K
import keras.optimizers
from metrics import *
def create_inceptionv3(n_classes):
base_model = InceptionV3(weights='imagenet', include_top=False)
model, output_node_name, n_new_layers = add_fc_layer_and_output_layer(base_model, n_classes)
image_size = 299
return model, output_node_name, image_size, n_new_layers
def create_mobilenetv2(n_classes):
image_size = 224
input_shape = (image_size, image_size, 3)
# alpha=1.4 has the best results from what I read, but I get OOM errors for alpha=1.4.
base_model = MobileNetV2(input_shape=input_shape, alpha=1.3, weights='imagenet', include_top=False)
model, output_node_name, n_new_layers = add_fc_layer_and_output_layer(base_model, n_classes)
return model, output_node_name, image_size, n_new_layers
def load_model(path, n_classes):
base_model = keras.models.load_model(path)
print(base_model.layers[-1])
print(base_model.output)
#base_model.summary()
base_model.layers.pop()
base_model.layers.pop()
#base_model.summary()
# Rename the last two layers otherwise we will get a name clash when add a new dense layer below.
#base_model.layers[-2].name = base_model.layers[-2].name + '_original'
#base_model.layers[-1].name = base_model.layers[-1].name + '_original'
predictions = add_output_layer(base_model.layers[-1].output, n_classes)
# this is the model we will train
new_model = Model(inputs=base_model.input, outputs=predictions)
#new_model.summary()
image_size = 224 # Assume that its a mobilenetv2 model.
n_new_layers = 1
return new_model, 'dense_1/Softmax', image_size, n_new_layers
def add_fc_layer_and_output_layer(base_model, n_classes):
# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
predictions = add_output_layer(x, n_classes)
# this is the model we will train
n_new_layers = 2
return Model(inputs=base_model.input, outputs=predictions), 'dense_2/Softmax', n_new_layers
def add_output_layer(input, n_classes):
# let's add a fully-connected layer
x = Dense(n_classes, activation='relu')(input)
# and a logistic layer
predictions = Dense(n_classes, activation='softmax')(x)
return predictions
def freeze_layers(model, n_layers):
# at this point, the top layers are well trained and we can start fine-tuning
# convolutional layers from inception V3. We will freeze the bottom N layers
# and train the remaining top layers.
# let's visualize layer names and layer indices to see how many layers
# we should freeze:
#for i, layer in enumerate(base_model.layers):
# print(i, layer.name)
# we chose to train the top 2 inception blocks, i.e. we will freeze
# the first 249 layers and unfreeze the rest:
print("Freezing %s out of %s layers" % (n_layers, len(model.layers)))
for layer in model.layers[:n_layers]:
layer.trainable = False
for layer in model.layers[n_layers:]:
layer.trainable = True
def compile(model, n_classes):
#def w_acc(y_true, y_pred):
# return weighted_accuracy(n_classes, y_true, y_pred)
model.compile(
#optimizer=keras.optimizers.RMSprop(lr=0.00001),
optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.85, decay=0.001),
#optimizer=keras.optimizers.Adam(lr=0.0001),
#optimizer=keras.optimizers.Adagrad(lr=0.001, decay=0.001),
#optimizer=keras.optimizers.Adadelta(lr=0.01),
loss='categorical_crossentropy',
metrics=[categorical_accuracy])
def save_model(model, output_dir, model_name, output_node_name, classes):
saver = tf.train.Saver()
#saver.save(K.get_session(), checkpoint_path)
sess = K.get_session()
frozen_graph = freeze_session(sess, output_names=[out.op.name for out in model.outputs])
#graph_def = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, [ output_node_name ])
os.makedirs(output_dir)
pb_path = os.path.join(output_dir, '%s.pb' % model_name)
with tf.gfile.GFile(pb_path, 'wb') as f:
f.write(frozen_graph.SerializeToString())
print("Saved to", pb_path)
hdf5_path = os.path.join(output_dir, '%s.h5py' % model_name)
keras.models.save_model(model, hdf5_path, include_optimizer=False)
print("Saved to", hdf5_path)
save_labels(classes, os.path.join(output_dir, "labels.txt"))
saver = tf.train.Saver()
saver_path = saver.save(sess, os.path.join(output_dir, "%s.ckpt" % model_name))
print("Saved to", saver_path)
def freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True):
"""
Freezes the state of a session into a pruned computation graph.
Creates a new computation graph where variable nodes are replaced by
constants taking their current value in the session. The new graph will be
pruned so subgraphs that are not necessary to compute the requested
outputs are removed.
@param session The TensorFlow session to be frozen.
@param keep_var_names A list of variable names that should not be frozen,
or None to freeze all the variables in the graph.
@param output_names Names of the relevant graph outputs.
@param clear_devices Remove the device directives from the graph for better portability.
@return The frozen graph definition.
"""
graph = session.graph
with graph.as_default():
freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or []))
output_names = output_names or []
output_names += [v.op.name for v in tf.global_variables()]
input_graph_def = graph.as_graph_def()
if clear_devices:
for node in input_graph_def.node:
node.device = ""
frozen_graph = tf.graph_util.convert_variables_to_constants(
session, input_graph_def, output_names, freeze_var_names)
return frozen_graph
|
import cv2
import numpy as np
def read_image(image_path: str) -> np.ndarray:
stream = open(image_path, "rb")
bytes = bytearray(stream.read())
array = np.asarray(bytes, dtype=np.uint8)
image = cv2.imdecode(array, cv2.IMREAD_UNCHANGED)
return image
|
# -*- coding: utf-8 -*-
"""
@Time : 2020/4/13 下午6:07
@File : jqtestbase.py
@author : pchaos
@license : Copyright(C), pchaos
@Contact : p19992003#gmail.com
"""
import unittest
import datetime
import os
from jqdatasdk import *
from dotenv import load_dotenv
from .testbase import TestingBase
def getEnvVar(key):
from os import sys, path
# __file__ should be defined in this case
DIRNAME = path.dirname(path.dirname(path.abspath(__file__)))
if DIRNAME not in sys.path:
sys.path.append(DIRNAME)
load_dotenv(verbose=True)
return os.getenv(key)
class jqTestingbase(TestingBase):
@classmethod
def userInit(cls):
"""用户初始化
"""
# .env 文件中写入相应的参数
userid = getEnvVar('jquserid')
passwd = getEnvVar('jqpasswd')
assert userid
auth(userid, passwd)
@classmethod
def userEnd(cls):
"""class结束,用户释放资源
"""
pass
|
# -*- coding: utf-8 -*-
# Copyright 2019 Spanish National Research Council (CSIC)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import hashlib
import hmac
from aiohttp import web
import aiohttp_jinja2
import aiohttp_session_flash as flash
import markdown
from deep_dashboard import config
from deep_dashboard import deep_oc
from deep_dashboard import log
CONF = config.CONF
LOG = log.LOG
routes = web.RouteTableDef()
@routes.get('/modules', name="modules")
@aiohttp_jinja2.template('modules/index.html')
async def index(request):
request.context["templates"] = await request.app.cache.modules.get_all()
request.context["breadcrumbs"] = [
("Home", False, "/"),
("Modules", True, "/modules"), # FIXME(aloga): use url
]
return request.context
@routes.post("/reload", name="reload")
async def reload_all_modules(request):
"""Load TOSCA templates and map them to modules
This function is used to refresh the TOSCA templates and the mapping
between modules and TOSCA templates. A webhook is set up so that when any
of the repos [1][2] is updated, Github will POST to this method to refresh
the Dashboard. The webhook's secret has to be the same has GITHUB_SECRET in
the conf so that we can validate that the payload comes indeed from Github
and the webhook has to be configured to deliver an 'application/json'.
[1] https://github.com/deephdc/deep-oc
[2] https://github.com/indigo-dc/tosca-templates/tree/master/deep-oc
[3] https://gist.github.com/categulario/deeb41c402c800d1f6e6
"""
# Check request comes indeed from Github
if CONF.github_secret:
if 'X-Hub-Signature' not in request.headers:
return web.Response(
text='Refresh petitions must be signed from Github.',
status=403
)
# FIXME(aloga): this does not work
signature = hmac.new(
CONF.github_secret,
request.data,
hashlib.sha1
).hexdigest()
if not hmac.compare_digest(
signature,
request.headers['X-Hub-Signature'].split('=')[1]
):
return web.Response(
text='Failed to verify the signature!',
status=403
)
LOG.info('Reloading modules and TOSCA templates ...')
await deep_oc.load_deep_oc_as_task(request.app)
return web.Response(status=201)
@routes.get("/modules/{module}/train", name="module.train")
@aiohttp_jinja2.template('modules/train.html')
async def configure_module_training(request):
module = request.match_info["module"].lower()
if not await request.app.cache.modules.exists(module):
flash.flash(
request,
("danger", f"Module does not exist: {module}.")
)
return web.HTTPFound("/modules")
request.context["selected_module"] = module
module_meta = await request.app.cache.modules.get(module)
selected_tosca = request.query.get(
"selected_tosca",
list(module_meta["tosca_templates"].keys())[0]
)
template_name = module_meta["tosca_templates"][selected_tosca]
hardware_configuration = request.query.get("hardware_configuration",
"CPU").lower()
docker_tag = request.query.get("docker_tag",
module_meta["docker_tags"][0]).lower()
run_command = request.query.get("run_command",
"DEEPaaS")
general_configuration = {
"tosca_templates": {
"available": module_meta["tosca_templates"].keys(),
"selected": selected_tosca,
},
"docker_tags": {
"available": module_meta["docker_tags"],
"selected": docker_tag,
},
"hardware_configuration": {
"available": ["CPU", "GPU"],
"selected": hardware_configuration,
},
"run_command": {
"available": ["DEEPaaS", "JupyterLab", "Custom command"],
"selected": run_command,
},
}
tosca_template = module_meta["tosca_templates"].get(selected_tosca)
if tosca_template is None:
flash.flash(
request,
("danger", f"TOSCA template does not exist: {tosca_template}.")
)
return web.HTTPFound("/modules")
aux = await request.app.cache.tosca_templates.get(tosca_template)
inputs = copy.deepcopy(
aux["inputs"]
)
inputs['docker_image'].setdefault(
'default',
module_meta['sources']['docker_registry_repo'])
docker_tags = module_meta['docker_tags']
if docker_tag not in docker_tags:
docker_tag = docker_tags[0]
if run_command == 'deepaas':
inputs['run_command']['default'] = 'deepaas-run --listen-ip=0.0.0.0'
if hardware_configuration == 'gpu':
inputs['run_command']['default'] += ' --listen-port=$PORT0'
elif run_command == 'jupyterlab':
flash.flash(
request,
("warning", 'Remember to set a Jupyter password.')
)
inputs['run_command']['default'] = (
'/srv/.jupyter/run_jupyter.sh --allow-root'
)
if hardware_configuration == 'gpu':
inputs['run_command']['default'] = (
"jupyterPORT=$PORT2 " + inputs['run_command']['default']
)
if hardware_configuration == 'cpu':
inputs['num_cpus']['default'] = 1
inputs['num_gpus']['default'] = 0
inputs['run_command']['default'] = (
"monitorPORT=6006 " + inputs['run_command']['default']
)
elif hardware_configuration == 'gpu':
inputs['num_cpus']['default'] = 1
inputs['num_gpus']['default'] = 1
inputs['run_command']['default'] = (
"monitorPORT=$PORT1 " + inputs['run_command']['default']
)
# FIXME(aloga): improve conditions here
if run_command == "custom command":
inputs['run_command']['default'] = ''
inputs['docker_image']['default'] += ':{}'.format(docker_tag)
grouped = {
"docker": {},
"jupyter": {},
"storage": {},
"hardware": {},
"other": {},
}
for k, v in inputs.items():
if k.startswith("docker_"):
grouped["docker"][k] = v
elif k.startswith("jupyter_"):
grouped["jupyter"][k] = v
elif any([k.startswith("rclone_"),
k.startswith("onedata_"),
k.startswith("oneclient_"),
k == "app_in_out_base_dir"]):
grouped["storage"][k] = v
elif k in ["mem_size", "num_cpus", "num_gpus"]:
grouped["hardware"][k] = v
else:
grouped["other"][k] = v
template_meta = {
"inputs": inputs,
"grouped": grouped,
}
request.context["general_configuration"] = general_configuration
request.context["template_meta"] = template_meta
request.context["template_name"] = template_name
request.context["slas"] = request.app.slas
request.context["module_meta"] = module_meta
request.context["breadcrumbs"] = [
("Home", False, "/"),
("Modules", False, "/modules"), # FIXME(aloga): use url
(module, False, f"/modules/{module}"), # FIXME(aloga): use url
("train", True, f"/modules/{module}/train"), # FIXME(aloga): use url
]
return request.context
@routes.get("/modules/{module}", name="module")
@aiohttp_jinja2.template('modules/module.html')
async def module_info(request):
module = request.match_info["module"].lower()
if not await request.app.cache.modules.exists(module):
flash.flash(
request,
("danger", f"Module does not exist: {module}.")
)
return web.HTTPFound("/modules")
module_meta = await request.app.cache.modules.get(module)
request.context["modulename"] = module
request.context["module_meta"] = copy.deepcopy(module_meta)
description = module_meta.get("description")
if description:
description = "\n".join(description)
else:
description = "No description provided."
description = markdown.markdown(description)
request.context["module_meta"]["description"] = description
request.context["breadcrumbs"] = [
("Home", False, "/"),
("Modules", False, "/modules"), # FIXME(aloga): use url
(module, True, f"/modules/{module}"), # FIXME(aloga): use url
]
return request.context
|
#-*- coding: utf-8 -*-
# Creation Date : 2016-10-21
# Created by : Antoine LeBel
import observer
class MailingService(observer.Observer):
MIN_HUMIDITY = 20
def __init__(self):
self.sensor = None
def update(self, HumiditySensor):
self.sensor = HumiditySensor
if HumiditySensor.humidity < self.MIN_HUMIDITY:
self._send_humidity_warning()
def _send_humidity_warning(self):
print("Un message courriel a été envoyé pour avertir que les plantes sont sèches!")
|
import config_tb
from config_db import config
import requests
from datetime import datetime
from bs4 import BeautifulSoup
import telebot
from db import UseDataBase
from emoji import *
# заглавная страница сервиса Яндекс.Погода с прогнозом
# по текущему месту положения
URL = 'https://yandex.ru/pogoda/'
# список регионов России
URL_REGIONS = 'https://yandex.ru/pogoda/region/225?via=reg'
# ссылка на конкретный регион
URL_REGION = None
class Var():
def __init__(self):
# первая буква из названия региона
self.btn = None
# первая буква из субъекта региона
self.btn_sub_reg = None
# список регионов или их субъектов
self.regions = None
users_property = {}
bot = telebot.TeleBot(config_tb.TOKEN)
@bot.message_handler(commands=['start'])
def welcome(message):
users_property[message.chat.id] = Var()
with UseDataBase(config) as cursor:
query = f"""
INSERT INTO users_property
(
chat_id,
url,
url_region
)
VALUES (
{message.chat.id},
'{URL}',
'{URL_REGION}'
)
ON CONFLICT(chat_id) DO NOTHING;
"""
cursor.execute(query)
bot.send_message(
message.chat.id,
(
'Привет! Я помогу тебе узнать прогноз погоды.\n'
'Чтобы посмотреть данные о погоде на текущий момент '
'/weather_now.\n'
'Посмотреть подробный прогноз на сегодня '
'/weather_today.\n'
'Посмотреть прогноз погоды на 10 дней /10_day_forecast.\n'
'Выбрать местоположение /select_city_or_area.\n'
'Получить помощь /help.\n'
f'Текущее местоположение: {start_area()}'
)
)
@bot.message_handler(commands=['help'])
def help(message):
bot.send_message(
message.chat.id,
(
'1) Посмотреть погоду на текущий момент /weather_now.\n'
'2) Посмотреть подробный прогноз на сегодня '
'/weather_today.\n'
'3) Посмотреть прогноз погоды на 10 дней /10_day_forecast.\n'
'4) Нажми «Обновить», чтобы получить обновленную информацию о'
' погоде.\n'
'5) Для смены региона в прогнозе погоды /location_selection.\n'
'6) Бот поддерживает встроенный режим. Введи <yournameforthebot>'
' в любом чате и выбери команду для составления прогноза погоды.'
),
# добавьте по желанию
## reply_markup=button(
## text='Связаться с разработчиком',
##
##
## url='telegram.me/<yourrandomdeveloper>'
## )
)
@bot.message_handler(commands=['weather_now'])
def current_weather(message):
bot.send_chat_action(message.chat.id, 'typing')
bot.send_message(
message.chat.id,
set_message(get_urls('url', message.chat.id)),
parse_mode='html',
reply_markup=button(
text='Обновить',
callback_data='update_current',
switch_inline_query='Current'
)
)
@bot.message_handler(commands=['weather_today'])
def weather_today(message):
bot.send_chat_action(message.chat.id, 'typing')
bot.send_message(
message.chat.id,
set_today_message(get_urls('url', message.chat.id)),
parse_mode='html',
reply_markup=button(
text='Обновить',
callback_data='update_today',
switch_inline_query='Today'
)
)
@bot.message_handler(commands=['10_day_forecast'])
def ten_day_weather(message):
bot.send_chat_action(message.chat.id, 'typing')
bot.send_message(
message.chat.id,
set_message_10_day(get_urls('url', message.chat.id)),
parse_mode='html',
reply_markup=button(
text='Обновить',
callback_data='update_10_day',
switch_inline_query='10 day'
)
)
def start_area():
soup = scraping(URL)
area = soup.find('ol', 'breadcrumbs__list')
country, region, area = area.find_all('span', 'breadcrumbs__title')
return f'{country.text} > {region.text} > {area.text}'
@bot.message_handler(commands=['select_city_or_area'])
def location_selection(message):
users_property[message.chat.id] = Var()
bot.send_chat_action(message.chat.id, 'typing')
keyboard = alphabet(
URL_REGIONS,
'set_region'
)
bot.send_message(
message.chat.id,
'Выберите первый символ из названия региона РФ',
reply_markup=keyboard
)
@bot.callback_query_handler(func=lambda call: call.data.startswith('update'))
def weather_callback(query):
bot.answer_callback_query(query.id)
if query.message:
bot.send_chat_action(query.message.chat.id, 'typing')
if query.data == 'update_current':
bot.edit_message_text(
set_message(
get_urls(
'url',
query.message.chat.id
),
True
),
query.message.chat.id,
query.message.message_id,
parse_mode='HTML'
)
bot.edit_message_reply_markup(
query.message.chat.id,
query.message.message_id,
reply_markup=button(
text='Обновить',
callback_data='update_current',
switch_inline_query='Current'
)
)
elif query.data == 'update_10_day':
bot.edit_message_text(
set_message_10_day(
get_urls(
'url',
query.message.chat.id
),
True
),
query.message.chat.id,
query.message.message_id,
parse_mode='HTML'
)
bot.edit_message_reply_markup(
query.message.chat.id,
query.message.message_id,
reply_markup=button(
text='Обновить',
callback_data='update_10_day',
switch_inline_query='10 day'
)
)
elif query.data == 'update_today':
bot.edit_message_text(
set_today_message(
get_urls(
'url',
query.message.chat.id
),
True
),
query.message.chat.id,
query.message.message_id,
parse_mode='HTML'
)
bot.edit_message_reply_markup(
query.message.chat.id,
query.message.message_id,
reply_markup=button(
text='Обновить',
callback_data='update_today',
switch_inline_query='Today'
)
)
elif query.inline_message_id:
bot.send_chat_action(query.from_user.id, 'typing')
if query.data == 'update_current':
bot.edit_message_text(
set_message(
get_urls(
'url',
query.from_user.id
),
True
),
inline_message_id=query.inline_message_id,
parse_mode='HTML'
)
bot.edit_message_reply_markup(
inline_message_id=query.inline_message_id,
reply_markup=button(
text='Обновить',
callback_data='update_current',
switch_inline_query='Current'
)
)
elif query.data == 'update_10_day':
bot.edit_message_text(
set_message_10_day(
get_urls(
'url',
query.from_user.id
),
True
),
inline_message_id=query.inline_message_id,
parse_mode='HTML'
)
bot.edit_message_reply_markup(
inline_message_id=query.inline_message_id,
reply_markup=button(
text='Обновить',
callback_data='update_10_day',
switch_inline_query='10 day'
)
)
elif query.data == 'update_today':
bot.edit_message_text(
set_today_message(
get_urls(
'url',
query.from_user.id
),
True
),
inline_message_id=query.inline_message_id,
parse_mode='HTML'
)
bot.edit_message_reply_markup(
inline_message_id=query.inline_message_id,
reply_markup=button(
text='Обновить',
callback_data='update_today',
switch_inline_query='Today'
)
)
@bot.callback_query_handler(func=lambda call: True)
def location_query(query):
if query.message.chat.id not in users_property:
users_property[query.message.chat.id] = Var()
user = users_property[query.message.chat.id]
bot.answer_callback_query(query.id)
try:
if query.data == 'set_location_back':
keyboard = alphabet(
URL_REGIONS,
'set_region'
)
bot.edit_message_text(
'Выберите первый символ из названия региона РФ',
query.message.chat.id,
query.message.message_id
)
elif query.data.startswith('set_region'):
regions = set_region(
query.data[-1],
URL_REGIONS
)
keyboard = telebot.types.InlineKeyboardMarkup(2)
lst = [
telebot.types.InlineKeyboardButton(
regions[region][0],
callback_data=(
f'set_sub_reg{query.data[-1]}'
f'|{regions[region][1]}'
)
)
for region in range(len(regions))
]
keyboard.add(*lst)
keyboard.add(
telebot.types.InlineKeyboardButton(
'<<Назад',
callback_data='set_location_back'
)
)
bot.edit_message_text(
'Выберите регион',
query.message.chat.id,
query.message.message_id
)
elif (query.data.startswith('set_sub_reg')
or query.data == 'set_sub_reg_back'):
if query.data != 'set_sub_reg_back':
btn, value = query.data.split('|')
set_urls(
'url_region',
value,
query.message.chat.id
)
user.btn = btn[-1]
keyboard = alphabet(
get_urls(
'url_region',
query.message.chat.id
),
'main_sub_reg'
)
keyboard.add(
telebot.types.InlineKeyboardButton(
'<<Назад',
callback_data=f'set_region{user.btn}'
)
)
bot.edit_message_text(
'Выберите первый символ из названия субъекта региона',
query.message.chat.id,
query.message.message_id
)
elif query.data.startswith('main_sub_reg'):
if query.data != 'main_sub_reg_back':
user.btn_sub_reg = query.data[-1]
url_region = get_urls('url_region', query.message.chat.id)
user.regions = set_region(user.btn_sub_reg, url_region)
keyboard = telebot.types.InlineKeyboardMarkup(2)
lst = [
telebot.types.InlineKeyboardButton(
user.regions[region][0],
callback_data=f'current|{user.regions[region][0][:12]}'
)
for region in range(len(user.regions))
]
keyboard.add(*lst)
keyboard.add(
telebot.types.InlineKeyboardButton(
'<<Назад',
callback_data='set_sub_reg_back'
)
)
bot.edit_message_text(
'Выберите место',
query.message.chat.id,
query.message.message_id
)
elif query.data.startswith('current'):
key = query.data.split("|")[1]
regions = dict(user.regions)
sub_reg = [
(region, regions[region]) for region in regions.keys()
if region.startswith(key)
]
set_urls(
'url',
sub_reg[0][1],
query.message.chat.id
)
keyboard = telebot.types.InlineKeyboardMarkup()
keyboard.row(
telebot.types.InlineKeyboardButton(
'<<Назад',
callback_data='main_sub_reg_back'
)
)
bot.edit_message_text(
f'Вы выбрали "{sub_reg[0][0]}" локацией по умолчанию.',
query.message.chat.id,
query.message.message_id
)
except TypeError:
keyboard = alphabet(
URL_REGIONS,
'set_region'
)
bot.edit_message_text(
'Выберите первый символ из названия региона РФ',
query.message.chat.id,
query.message.message_id
)
bot.edit_message_reply_markup(
query.message.chat.id,
query.message.message_id,
reply_markup=keyboard
)
def scraping(url: str):
html = requests.get(url)
soup = BeautifulSoup(html.text, 'lxml')
return soup
def set_message(url, change: bool = False):
soup = scraping(url)
sub_reg = soup.find('h1').text
area = soup.find('ol', 'breadcrumbs__list')
region = area.find_all('span', 'breadcrumbs__title')[1].text
weather_value = soup.find_all('div', 'term__value')
condition = soup.find('div', 'link__condition day-anchor i-bem').text
time = soup.find('time')
current_time = time.text
tz = time.get('datetime')
time_of_day = int((tz.strip(". ").split(' ')[1].split(':')[0]))
weather_value = [item.text for item in weather_value]
try:
wind = wind_dir[(weather_value[2].split("м/с, ")[1])]
except IndexError:
wind = ''
if change is True:
update = '<i>(Обновлено)</i>\n'
else:
update = ''
sun_card = soup.find('div', 'sun-card__text-info')
for v, item in enumerate(sun_card):
if v == 2:
magnetic_field = item
elif v == 4:
uv_index = item
return (
f'{sub_reg}\n(<i>{region}</i>)\n'
f'{update}\n'
f'{current_time.strip(". ")}(МСК{time_zone(tz)})\n'
f'текущая температура {"".join([weather_value[0], "°"])}\n'
f'ощущается как {"".join([weather_value[1], "°"])}\n'
f'{condition} {get_weather_emoji(condition, time_of_day)}\n'
f'{dashing_away} {weather_value[2]}'
f'{wind}\n'
f'{droplet} {weather_value[3]} '
f'{barometer} {weather_value[4]}\n'
f'{uv_index}\n'
f'{magnetic_field}'
)
def set_today_message(url, change=None):
url = url.split('?')[0] + '/details'
soup = scraping(url)
area = soup.find('nav', 'breadcrumbs')
region, city = area.find_all('span', 'breadcrumbs__title')[1:3]
data = soup.find('div', 'card')
fields_val = soup.find_all('dd', 'forecast-fields__value')[:2]
uv_index, magnetic_field = [item.text for item in fields_val]
today = data.find(
'h2',
'forecast-details__title'
)
day = today.find('strong').text
month = today.find('span').text
table = data.find_all(
'tr',
'weather-table__row'
)
rows = []
if change is True:
update = '<i>(Обновлено)</i>\n'
else:
update = ''
for val in table:
daypart = val.find(
'div',
'weather-table__daypart'
).text
# температура, прогнозируемая на определенную часть суток
# и как она ощущается
temp = val.find_all(
'span',
'temp__value temp__value_with-unit'
)
temp = [t.text for t in temp]
condition = val.find(
'td',
'weather-table__body-cell weather-table__body-cell_type_condition'
).text
pressure = val.find(
'td',
(
'weather-table__body-cell weather-table__body-cell_type_air-'
'pressure'
)
).text
humidity = val.find(
'td',
'weather-table__body-cell weather-table__body-cell_type_humidity'
).text
wind_speed = val.find('span', 'wind-speed').text
direct = val.find('abbr').text
rows.append(
{
'daypart': daypart,
'temp': temp,
'condition': condition,
'pressure': pressure,
'humidity': humidity,
'wind_speed': wind_speed,
'direct': direct
}
)
mes = [
' '.join
(
[
i["daypart"].capitalize(),
(
i["temp"][0] +
'°' +
'...' +
i["temp"][1] +
'°'
),
'\n',
i["condition"],
get_weather_emoji(
i["condition"],
i["daypart"]
),
'\n',
barometer,
i["pressure"],
droplet,
i["humidity"],
dashing_away,
i["wind_speed"],
i["direct"],
wind_dir[i["direct"]],
'\n',
'ощущается как',
(i["temp"][2] +
'°'),
'\n\n'
]
)
for i in rows
]
return (
f'Cегодня {day} {month}\n'
f'{city.text}\n<i>({region.text})</i>\n'
f'{update}\n'
f'{"".join(mes)}'
f'УФ-индекс {uv_index}\n'
f'Магнитное поле {magnetic_field}'
)
def set_message_10_day(url, change: bool = False):
soup = scraping(url)
sub_reg = soup.find(
'h1',
class_='title title_level_1 header-title__title'
).text
area = soup.find('ol', 'breadcrumbs__list')
region = area.find_all('span', 'breadcrumbs__title')[1].text
ten_day = soup.find_all('div', 'forecast-briefly__name')
time = soup.find_all('time', class_='forecast-briefly__date')
t_day = soup.find_all(
'div',
class_='temp forecast-briefly__temp forecast-briefly__temp_day'
)
t_night = soup.find_all(
'div',
class_='temp forecast-briefly__temp forecast-briefly__temp_night'
)
condition = soup.find_all('div', class_='forecast-briefly__condition')
if change is True:
update = '<i>(Обновлено)</i>\n'
else:
update = ''
mes = [
' '.join(
[
ten_day[i].text,
time[i].text,
(
'\n'
+ t_day[i].text
+ '°'
),
(
', '
+ t_night[i].text
+ '°'
)
]
)
+ f'\n {condition[i].text}'
+ f' {get_weather_emoji(condition[i].text)}'
+ '\n\n'
for i in range(2, 12)
]
return (
f'{sub_reg}'
f'\n<i>({region})</i>'
'\nПрогноз на 10 дней\n'
f'{update}\n'
f'{"".join(mes)}'
)
def set_urls(url, value, chat_id):
with UseDataBase(config) as cursor:
operation = f"""
UPDATE users_property
SET {url} = '{value}'
WHERE chat_id = {chat_id};
"""
cursor.execute(operation)
def get_urls(url, chat_id):
with UseDataBase(config) as cursor:
operation = f"""
SELECT {url}
FROM users_property
WHERE chat_id = {chat_id};
"""
cursor.execute(operation)
result = cursor.fetchall()
return result[0][0]
def alphabet(url, choosing_region):
alphabet = scraping(url)
alphabet = alphabet.find_all(
'h2',
'title title_level_2 place-list__letter'
)
alphabet = [i.get_text() for i in alphabet]
keyboard = keyboard_rows(alphabet, choosing_region)
return keyboard
def keyboard_rows(data, choosing_region):
keyboard = telebot.types.InlineKeyboardMarkup(row_width=4)
lst = [
telebot.types.InlineKeyboardButton(
data[btn],
callback_data=f'{choosing_region + data[btn]}'
)
for btn in range(len(data))
]
keyboard.add(*lst)
return keyboard
def set_region(letter, url):
regions = get_location(url)
regions = [
(region, regions[region]) for region in regions.keys()
if region.startswith(letter)
]
return regions
def get_location(url):
soup = scraping(url)
soup = soup.find_all(
'li',
'place-list__item place-list__item_region_yes'
)
names = [name.get_text() for name in soup]
links = [
'https://yandex.ru' +
link.find('a').get('href') for link in soup
]
regions = dict(zip(names, links))
return regions
def time_zone(tz):
tz = int(tz.split('+')[1][:2]) - 3
if tz > 0:
tz = '+' + str(tz)
elif tz == 0:
tz = ''
else:
tz = '-' + str(tz)
return tz
def button(text: str, url: str = None, callback_data: str = None,
switch_inline_query: str = None):
keyboard = telebot.types.InlineKeyboardMarkup()
first_btn = telebot.types.InlineKeyboardButton(
text,
url,
callback_data
)
if switch_inline_query:
keyboard.row(
first_btn,
telebot.types.InlineKeyboardButton(
text='Поделиться',
switch_inline_query=switch_inline_query
)
)
else:
keyboard.add(first_btn)
return keyboard
def get_weather_emoji(value, hour=None):
value = value.lower()
try:
if hour is not None:
# яндекс считает ночным временем с 0 ч. по 6 ч.
if isinstance(hour, str):
if hour == 'ночью':
hour = 3 # для удобства получения emoji выбрано это время
if isinstance(hour, int):
if hour < 6:
return weather_conditions_night[value]
return weather_conditions[value]
except KeyError as err:
with open('report_emoji.txt', 'a') as file:
print(f'KeyError get_weather_emoji: {err}', file=file)
return ''
@bot.inline_handler(func=lambda query: True)
def inline_mode(inline_query):
current = telebot.types.InlineQueryResultArticle(
'1',
'Current',
telebot.types.InputTextMessageContent(
set_message(
get_urls(
'url',
inline_query.from_user.id
)
)
),
reply_markup=button(
text='Обновить',
callback_data='update_current',
switch_inline_query='Current'
),
description='Погода сейчас',
thumb_url=(
'https://www.clipartkey.com/mpngs/m/273-2739384_weather'
'-icon-heart.png'
)
)
ten_day = telebot.types.InlineQueryResultArticle(
'3',
'10 day',
telebot.types.InputTextMessageContent(
set_message_10_day(
get_urls(
'url',
inline_query.from_user.id
)
)
),
reply_markup=button(
text='Обновить',
callback_data='update_10_day',
switch_inline_query='10 day'
),
description='Прогноз на 10 дней',
thumb_url=(
'https://unblast.com/wp-content/uploads/2020/05/Weather-'
'Vector-Icons-1536x1152.jpg'
)
)
today = telebot.types.InlineQueryResultArticle(
'2',
'Today',
telebot.types.InputTextMessageContent(
set_today_message(
get_urls(
'url',
inline_query.from_user.id
)
)
),
reply_markup=button(
text='Обновить',
callback_data='update_today',
switch_inline_query='Today'
),
description='Прогноз на сегодня',
thumb_url=(
'https://www.clipartkey.com/mpngs/m/273-2739384_weather'
'-icon-heart.png'
)
)
bot.answer_inline_query(
inline_query.id,
[current, today, ten_day]
)
if __name__ == '__main__':
bot.polling(none_stop=True)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class DQN(nn.Module):
def __init__(self, input_dimension, output_dimension):
super(DQN, self).__init__()
self.layer_1 = nn.Linear(
in_features=input_dimension, out_features=64)
self.layer_2 = nn.Linear(in_features=64, out_features=128)
self.layer_3 = nn.Linear(in_features=128, out_features=64)
#size = 128 * output_dimension
input_size = 64
self.output_layer = nn.Linear(
in_features=input_size, out_features=output_dimension)
def forward(self, net_input):
#net_input = net_input.view(net_input.size(0), -1)
layer_1_output = F.relu(self.layer_1(net_input))
layer_2_output = F.relu(self.layer_2(layer_1_output))
layer_3_output = F.relu(self.layer_3(layer_2_output))
#output = self.output_layer(layer_3_output.view(layer_3_output.size(0), -1))
output = self.output_layer(layer_3_output)
return output
|
from __future__ import annotations
import pytest
from ufoLib2.objects import Glyph, Layer
def test_init_layer_with_glyphs_dict() -> None:
a = Glyph()
b = Glyph()
layer = Layer("My Layer", {"a": a, "b": b})
assert layer.name == "My Layer"
assert "a" in layer
assert layer["a"] is a
assert a.name == "a"
assert "b" in layer
assert layer["b"] is b
assert b.name == "b"
with pytest.raises(
ValueError, match="glyph has incorrect name: expected 'a', found 'b'"
):
Layer(glyphs={"a": b})
with pytest.raises(KeyError, match=".*Glyph .* can't be added twice"):
Layer(glyphs={"a": a, "b": a})
with pytest.raises(TypeError, match="Expected Glyph, found int"):
Layer(glyphs={"a": 1}) # type: ignore
def test_init_layer_with_glyphs_list() -> None:
a = Glyph("a")
b = Glyph("b")
layer = Layer(glyphs=[a, b])
assert layer["a"] is a
assert layer["b"] is b
with pytest.raises(KeyError, match="glyph named 'a' already exists"):
Layer(glyphs=[a, a])
c = Glyph()
with pytest.raises(ValueError, match=".*Glyph .* has no name"):
Layer(glyphs=[c])
with pytest.raises(KeyError, match="glyph named 'b' already exists"):
Layer(glyphs=[a, b, Glyph("b")])
with pytest.raises(TypeError, match="Expected Glyph, found int"):
Layer(glyphs=[1]) # type: ignore
def test_addGlyph() -> None:
a = Glyph("a")
layer = Layer()
layer.addGlyph(a)
assert "a" in layer
assert layer["a"] is a
with pytest.raises(KeyError, match="glyph named 'a' already exists"):
layer.addGlyph(a)
def test_insertGlyph() -> None:
g = Glyph()
pen = g.getPen()
pen.moveTo((0, 0))
pen.lineTo((1, 1))
pen.lineTo((0, 1))
pen.closePath()
layer = Layer()
layer.insertGlyph(g, "a")
assert "a" in layer
assert layer["a"].name == "a"
assert layer["a"].contours == g.contours
assert layer["a"] is not g
layer.insertGlyph(g, "b")
assert "b" in layer
assert layer["b"].name == "b"
assert layer["b"].contours == layer["a"].contours
assert layer["b"] is not layer["a"]
assert layer["b"] is not g
assert g.name is None
with pytest.raises(KeyError, match="glyph named 'a' already exists"):
layer.insertGlyph(g, "a", overwrite=False)
with pytest.raises(ValueError, match=".*Glyph .* has no name; can't add it"):
layer.insertGlyph(g)
def test_newGlyph() -> None:
layer = Layer()
a = layer.newGlyph("a")
assert "a" in layer
assert layer["a"] is a
with pytest.raises(KeyError, match="glyph named 'a' already exists"):
layer.newGlyph("a")
def test_renameGlyph() -> None:
g = Glyph()
layer = Layer(glyphs={"a": g})
assert g.name == "a"
layer.renameGlyph("a", "a") # no-op
assert g.name == "a"
layer.renameGlyph("a", "b")
assert g.name == "b"
layer.insertGlyph(g, "a")
with pytest.raises(KeyError, match="target glyph named 'a' already exists"):
layer.renameGlyph("b", "a")
|
from typing import Callable, Union, List
from .errors import CredentialError, TokenExpired, QRExpiredError
from .utils import password_fixer
import json
import os
import threading
import requests
import websocket
class PyTeleBirr:
def __init__(
self,
phone_no: Union[int, str],
passwd: Union[int, str],
device_id: str
):
if len(str(passwd)) < 6:
raise CredentialError(
"Password Must Be 6 Digit"
)
self._headers = {
'Content-Type': 'application/json; charset=utf-8',
'Host': 'app.ethiomobilemoney.et:2121',
'Connection': 'Keep-Alive',
}
self._qr_header = {
'authority': 'api.qrcode-monkey.com',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/91.0.4472.164 Safari/537.36',
'content-type': 'text/plain;charset=UTF-8',
'accept': '*/*',
'origin': 'https://www.qrcode-monkey.com',
'sec-fetch-site': 'same-site',
'sec-fetch-mode': 'cors',
'referer': 'https://www.qrcode-monkey.com/',
}
self._passwd = passwd
self._phone = phone_no
self._device_id = device_id
self._tele_url = "https://app.ethiomobilemoney.et:2121/{}"
self._r = requests.Session()
self._base64_pass = password_fixer(self._passwd)
self._headers['Content-Length'] = str(len(self._base64_pass))
data = json.dumps({
"code": None,
"mid": str(self._phone),
"password": self._base64_pass,
"sid": self._device_id,
"language": "en"
})
_res = self._r.post(
self._tele_url.format("service-information/safelogin"),
data=data,
headers=self._headers
)
if _res.json()['code'] != 200:
raise CredentialError(
"[ Error ] : Password, Phone Number or Device id is incorrect"
)
self._token = _res.json()['data']['token']
self._header = {
'amm-token': self._token,
'Content-Type': 'application/json; charset=utf-8',
'Host': 'app.ethiomobilemoney.et:2121',
'Connection': 'Keep-Alive',
'Accept-Encoding': 'gzip'
}
def get_balance(self) -> object:
url = self._tele_url.format(
"service-transaction/getBalance"
)
res = self._r.post(
url,
data='{}',
headers=self._header
)
if res.json().get("code") in [401]:
raise TokenExpired(
"[ Error ] : Token Expired"
)
return res.json()['data']
def generate_qrcode(
self,
amount: Union[str, int] = '',
size: Union[str, int] = 350,
bg_color: str = "ffffff",
logo: str = "e8cb9ae2340c568713010178b6834ad9edced49f.png"
) -> str:
url = self._tele_url.format(
"service-transfe/produceC2CQRCode"
)
res = self._r.post(
url,
data=json.dumps(
{
"money": amount,
"content": ""
}
),
headers=self._header
)
if res.json().get("code") in [401]:
raise TokenExpired(
"[ Error ] : Token Expired"
)
_response = requests.get(
'https://api.qrcode-monkey.com//qr/custom?download=true&file=png&data=' + str(
res.json()['data'][
'content']) + f'&size={size}&config=%7B%22body%22%3A%22mosaic%22%2C%22eye%22%3A%22frame1%22%2C'
'%22eyeBall '
'%22%3A%22ball15%22%2C%22erf1%22%3A%5B%22fh%22%5D%2C%22erf2%22%3A%5B%5D%2C%22erf3'
'%22%3A '
'%5B%22fh%22%2C%22fv%22%5D%2C%22brf1%22%3A%5B%5D%2C%22brf2%22%3A%5B%5D%2C%22brf3%22'
'%3A '
f'%5B%5D%2C%22bodyColor%22%3A%22%23000000%22%2C%22bgColor%22%3A%22%23{bg_color}%22%2C'
'%22eye1Color%22%3A%22%23000000%22%2C%22eye2Color%22%3A%22%23000000%22%2C%22eye3Color'
'%22%3A%22%23000000%22%2C%22eyeBall1Color%22%3A%22%23000000%22%2C%22eyeBall2Color'
'%22%3A '
'%22%23000000%22%2C%22eyeBall3Color%22%3A%22%23000000%22%2C%22gradientColor1%22%3A%22'
'%23CC3873%22%2C%22gradientColor2%22%3A%22%235302BD%22%2C%22gradientType%22%3A'
'%22linear '
'%22%2C%22gradientOnEyes%22%3A%22true%22%2C%22logo%22%3A'
f'%22{logo}%22%2C%22logoMode%22%3A%22clean%22'
'%7D',
headers=self._qr_header
)
if os.path.exists("qr"):
with open("qr/qr.png", "wb") as f:
f.write(_response.content)
else:
os.mkdir("qr")
with open("qr/qr.png", "wb") as f:
f.write(_response.content)
return "qr/qr.png"
def refresh_token(self):
"""
tokens are valid for
refresh token
:return:
"""
_data = json.dumps(
{
"code": None,
"mid": str(self._phone),
"password": self._base64_pass,
"sid": self._device_id,
"language": "en"
}
)
_res = self._r.post(
self._tele_url.format(
"service-information/safelogin"
),
data=_data,
headers=self._header
)
if _res.json().get("code") in [401, 1000] or _res.status_code != 200:
raise TokenExpired(
"[ Error ] : Password, Phone Number or Device id is incorrect"
)
self._token = _res.json()['data']['token']
print("[ Token Refreshed ]")
def on_payment(
self,
on_payment: Callable,
on_error: Callable = lambda a: print("Socket error"),
on_open: Callable = lambda a: print("Socket Connected")
) -> None:
"""
when payment received on_msg will be called
notice: this method only works when sending payments via qr code
for phone number payment or ussd payment use by tx id
"""
def _on_message(_, msg):
on_payment(msg)
def _on_closed():
print("[ Socket Restarted ]")
self.on_payment(on_payment)
_ws = websocket.WebSocketApp(
self._tele_url.format(
f"websocket?token={self._token}"
).replace("https", "wss"),
on_open=on_open,
on_message=_on_message,
on_error=on_error,
on_close=_on_closed,
header={
'Origin': 'http://localhost',
'Sec-WebSocket-Key': 'aZwQ6W5X+KKAu9jzEdw8Mw==',
'Host': 'app.ethiomobilemoney.et:2121',
'User-Agent': 'okhttp/3.12.11'
}
)
print("[ Thread Started ]")
_tr = threading.Thread(
target=_ws.run_forever,
args=()
)
_tr.daemon = True
_tr.start()
def check_tx(
self,
tx_id: str
) -> Union[bool, dict]:
"""
Checks if transaction id is valid
"""
_url = self._tele_url.format(
"service-transaction/cusTransactionRecordDetail"
)
_res = self._r.post(
_url,
data=json.dumps(
{
"receiptNumber": tx_id
}
),
headers=self._header
)
if _res.json().get("code") in [401]:
raise TokenExpired(
"[ Error ] : Token Expired"
)
_exists = _res.json()
if _exists.get("code") in [1000, 401]:
return False
else:
return _exists
def is_my_tx(
self,
tx_id: str
) -> bool:
"""
since the api can see all transactions this function
checks if transaction is send to receiver
"""
_res = self._r.post(
self._tele_url.format(
"service-transaction/cusFourTransactionRecord"
),
data=json.dumps(
{
"startDateTime": "20210622",
"endDateTime": "",
"type": "1"
}
),
headers=self._header
)
if _res.json().get("code") in [401]:
raise TokenExpired(
"[ Error ] : Token Expired"
)
_exists = _res.json()
for _tx in _exists:
if type(_tx) == list:
for _t in _tx:
if _t.get("receiptNumber") == tx_id:
if _t.get("resTransactionType") == "Transfer":
if "+" in _t.get("resAmount"):
return True
return False
def get_packages(
self
) -> List[dict]:
"""
get all available packages
:returns: lists of dict
"""
_res = self._r.post(
self._tele_url.format(
"service-topup/productSettings"
),
headers=self._header,
data=json.dumps(
{
"category": "PACKAGE"
}
)
)
if _res.json().get("code") in [401]:
raise TokenExpired(
"[ Error ] : Token Expired"
)
return _res.json()['data']
def scan_qr(
self,
content: Union[str, int] = None
):
"""
get the user data you are sending for
you can get the receiver phone number by qr code :0
scan the qr code and pass the content to content param
:param content: receiver content number scan qr code to get this
:return: dict
"""
_res = self._r.post(
self._tele_url.format(
'service-transfe/scanReceiveC2CQRCode'
),
headers=self._header,
data=json.dumps(
{
"content": str(content)
}
)
)
if _res.json().get("code") in [401]:
raise TokenExpired(
"[ Error ] : Token Expired"
)
if _res.json()['data']:
return _res.json()['data']
else:
raise QRExpiredError(
"[ ERROR ] QR expired"
)
def _get_umc_session_id(
self,
money: Union[str, int],
phone: Union[str, int],
content: Union[str, int]
) -> dict:
_data = json.dumps(
{"money": str(money), "msisdn": str(phone), "pin": password_fixer(self._passwd), "content": str(content)})
print(_data)
self._header['Content-Length'] = str(len(_data))
_res = self._r.post(
self._tele_url.format(
'service-transfe/getTransferInfo'
),
headers=self._header,
data=_data
)
if _res.json().get("code") in [401]:
raise TokenExpired(
"[ Error ] : Token Expired"
)
print(_res.text)
return _res.json()['data']
def send_payment(
self,
amount: Union[str, int],
phone: Union[str, int],
content: Union[str, int]
):
umc_id = self._get_umc_session_id(
phone=phone,
money=amount,
content=content
)['umcSessionId']
print(umc_id)
_res = self._r.post(
self._tele_url.format(
'service-transfe/syncTransferC2C'
),
headers=self._header,
data=json.dumps(
{
"confirmationAction": "1",
"umcSessionId": umc_id,
"flag": "",
"mid": phone
}
)
)
if _res.json().get("code") in [401]:
raise TokenExpired(
"[ Error ] : Token Expired"
)
print(_res.text)
return _res.json()['data']
def get_token(self):
return self._token
|
# coding=utf-8
"""
"""
import os
import unittest
import shutil
from md_utils.rename_files import main
from md_utils.md_common import (capture_stdout, capture_stderr, silent_remove)
import logging
# logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
DISABLE_REMOVE = logger.isEnabledFor(logging.DEBUG)
__author__ = 'hmayes'
# Directories #
DATA_DIR = os.path.join(os.path.dirname(__file__), 'test_data')
SUB_DATA_DIR = os.path.join(DATA_DIR, 'rename_files')
# Files #
SMALL_FILE = os.path.join(SUB_DATA_DIR, 'small_file.txt')
# test data #
TEST_FILE_NAMES = ['has space.txt', 'has two spaces.txt', 'now!exclaim.txt']
REPLACED_FILE_NAMES1 = ['hasspace.txt', 'hastwospaces.txt', 'now!exclaim.txt']
REPLACED_FILE_NAMES2 = ['has space.txt', 'has two spaces.txt', 'now_exclaim.txt']
# REPLACED_FILE_NAMES3 = ['has_space.txt', 'has_two_spaces.txt', 'now!exclaim.txt']
def make_files(fname_list):
"""
Create files fresh, because will be moved when program runs
@param fname_list: list of file names without directory name
"""
for fname in fname_list:
new_file = os.path.join(SUB_DATA_DIR, fname)
shutil.copyfile(SMALL_FILE, new_file)
def add_sub_dir(fname_list, abs_dir):
"""
Create files fresh, because will be moved when program runs
@param fname_list: list of file names without directory name
@param abs_dir: absolute directory name
@return full_name_list: a list of file names with the specified absolute directory
"""
full_name_list = []
for fname in fname_list:
full_name_list.append(os.path.join(abs_dir, fname))
return full_name_list
def count_files(fname_list):
"""
Counts how many files in list exist
@param fname_list: list of file names
@return num_existing_files: a list of file names with the specified absolute directory
"""
num_existing_files = 0
for fname in fname_list:
if os.path.isfile(fname):
num_existing_files += 1
return num_existing_files
class TestRenameNoOutput(unittest.TestCase):
def testHelp(self):
test_input = ['-h']
if logger.isEnabledFor(logging.DEBUG):
main(test_input)
with capture_stderr(main, test_input) as output:
self.assertFalse(output)
with capture_stdout(main, test_input) as output:
self.assertTrue("optional arguments" in output)
def testInvalidArg(self):
test_input = ['-@']
if logger.isEnabledFor(logging.DEBUG):
main(test_input)
with capture_stderr(main, test_input) as output:
self.assertTrue("unrecognized arguments" in output)
class TestRename(unittest.TestCase):
def testNoFilesRenamed(self):
test_input = []
if logger.isEnabledFor(logging.DEBUG):
main(test_input)
with capture_stdout(main, test_input) as output:
self.assertTrue("Found and renamed 0 files" in output)
def testDefaultPatterns(self):
make_files(TEST_FILE_NAMES)
test_input = ["-d", SUB_DATA_DIR]
initial_fnames = add_sub_dir(TEST_FILE_NAMES, SUB_DATA_DIR)
expected_fnames = add_sub_dir(REPLACED_FILE_NAMES1, SUB_DATA_DIR)
try:
if logger.isEnabledFor(logging.DEBUG):
main(test_input)
# need to make again for capturing std out
make_files(TEST_FILE_NAMES)
with capture_stdout(main, test_input) as output:
self.assertTrue("Found and renamed 2 files" in output)
self.assertTrue(count_files(initial_fnames), 2)
self.assertTrue(count_files(expected_fnames), 3)
finally:
for fname in expected_fnames:
silent_remove(fname, disable=DISABLE_REMOVE)
def testAltPattern(self):
make_files(TEST_FILE_NAMES)
test_input = ["-d", SUB_DATA_DIR, "-p", "!", "-n", "_"]
initial_fnames = add_sub_dir(TEST_FILE_NAMES, SUB_DATA_DIR)
expected_fnames = add_sub_dir(REPLACED_FILE_NAMES2, SUB_DATA_DIR)
try:
if logger.isEnabledFor(logging.DEBUG):
main(test_input)
# need to make again for capturing std out
make_files(TEST_FILE_NAMES)
with capture_stdout(main, test_input) as output:
self.assertTrue("Found and renamed 1 files" in output)
self.assertTrue(count_files(initial_fnames), 1)
self.assertTrue(count_files(expected_fnames), 3)
finally:
for fname in expected_fnames:
silent_remove(fname, disable=DISABLE_REMOVE)
|
"""
Generic templates for different types of Encryption Schemes
"""
__version__ = "2.0.2"
|
def test_epochs_mapping(client):
case_id = 'scoring'
epochs_mapping_json = client.get(f'api/sandbox/epoch/{case_id}').json
assert epochs_mapping_json[0]['epoch_num'] == 1
assert epochs_mapping_json[1]['individual_id'] is not None
assert epochs_mapping_json[1]['epoch_num'] == 2
def test_case_params(client):
case_id = 'scoring'
case_params_json = client.get(f'api/sandbox/params/{case_id}').json
assert case_params_json['dataset_name'] == 'scoring'
assert case_params_json['metric_id'] == 'roc_auc'
assert case_params_json['task_id'] == 'classification'
|
import numpy as np
import sys
import gc
import chroma.api as api
if api.is_gpu_api_cuda():
import pycuda.driver as cuda
from pycuda import gpuarray as ga
elif api.is_gpu_api_opencl():
import pyopencl as cl
#from pyopencl.array import Array as ga
import pyopencl.array as ga
from chroma.tools import profile_if_possible
from chroma import event
from chroma.gpu.tools import get_module, api_options, chunk_iterator, to_float3, copy_to_float3
from chroma.gpu.gpufuncs import GPUFuncs
import time
class GPUPhotons(object):
def __init__(self, photons, ncopies=1, cl_context=None):
"""Load ``photons`` onto the GPU, replicating as requested.
Args:
- photons: chroma.Event.Photons
Photon state information to load onto GPU
- ncopies: int, *optional*
Number of times to replicate the photons
on the GPU. This is used if you want
to propagate the same event many times,
for example in a likelihood calculation.
The amount of GPU storage will be proportionally
larger if ncopies > 1, so be careful.
"""
nphotons = len(photons)
# Allocate GPU memory for photon info and push to device
if api.is_gpu_api_cuda():
self.pos = ga.empty(shape=nphotons*ncopies, dtype=ga.vec.float3)
self.dir = ga.empty(shape=nphotons*ncopies, dtype=ga.vec.float3)
self.pol = ga.empty(shape=nphotons*ncopies, dtype=ga.vec.float3)
self.wavelengths = ga.empty(shape=nphotons*ncopies, dtype=np.float32)
self.t = ga.empty(shape=nphotons*ncopies, dtype=np.float32)
self.last_hit_triangles = ga.empty(shape=nphotons*ncopies, dtype=np.int32)
self.flags = ga.empty(shape=nphotons*ncopies, dtype=np.uint32)
self.weights = ga.empty(shape=nphotons*ncopies, dtype=np.float32)
self.current_node_index = ga.zeros( shape=nphotons*ncopies, dtype=np.uint32 ) # deprecated
self.requested_workcode = ga.empty( shape=nphotons*ncopies, dtype=np.uint32 ) # deprecated
elif api.is_gpu_api_opencl():
queue = cl.CommandQueue( cl_context )
self.pos = ga.empty(queue, shape=nphotons*ncopies, dtype=ga.vec.float3)
self.dir = ga.empty(queue, shape=nphotons*ncopies, dtype=ga.vec.float3)
self.pol = ga.empty(queue, shape=nphotons*ncopies, dtype=ga.vec.float3)
self.wavelengths = ga.empty(queue, shape=nphotons*ncopies, dtype=np.float32)
self.t = ga.empty(queue, shape=nphotons*ncopies, dtype=np.float32)
self.last_hit_triangles = ga.empty(queue, shape=nphotons*ncopies, dtype=np.int32)
self.flags = ga.empty(queue, shape=nphotons*ncopies, dtype=np.uint32)
self.weights = ga.empty(queue, shape=nphotons*ncopies, dtype=np.float32)
self.current_node_index = ga.zeros( queue, shape=nphotons*ncopies, dtype=np.uint32 ) # deprecated
self.requested_workcode = ga.empty( queue, shape=nphotons*ncopies, dtype=np.uint32 ) # deprecated
# Assign the provided photons to the beginning (possibly
# the entire array if ncopies is 1
self.pos[:nphotons].set(to_float3(photons.pos))
self.dir[:nphotons].set(to_float3(photons.dir))
self.pol[:nphotons].set(to_float3(photons.pol))
self.wavelengths[:nphotons].set(photons.wavelengths.astype(np.float32))
self.t[:nphotons].set(photons.t.astype(np.float32))
self.last_hit_triangles[:nphotons].set(photons.last_hit_triangles.astype(np.int32))
self.flags[:nphotons].set(photons.flags.astype(np.uint32))
self.weights[:nphotons].set(photons.weights.astype(np.float32))
if api.is_gpu_api_cuda():
self.module = get_module('propagate.cu', options=api_options, include_source_directory=True)
elif api.is_gpu_api_opencl():
self.module = get_module('propagate.cl', cl_context, options=api_options, include_source_directory=True)
# define the texture references
self.define_texture_references()
# get kernel functions
self.gpu_funcs = GPUFuncs(self.module)
# Replicate the photons to the rest of the slots if needed
if ncopies > 1:
max_blocks = 1024
nthreads_per_block = 64
for first_photon, photons_this_round, blocks in \
chunk_iterator(nphotons, nthreads_per_block, max_blocks):
self.gpu_funcs.photon_duplicate(np.int32(first_photon), np.int32(photons_this_round),
self.pos, self.dir, self.wavelengths, self.pol, self.t,
self.flags, self.last_hit_triangles, self.weights,
np.int32(ncopies-1),
np.int32(nphotons),
block=(nthreads_per_block,1,1), grid=(blocks, 1))
# Save the duplication information for the iterate_copies() method
self.true_nphotons = nphotons
self.ncopies = ncopies
def define_texture_references( self, module=None ):
# unbound texture references declared for use with propagate
if module==None:
module = self.module
if api.is_gpu_api_cuda():
self.node_texture_ref = module.get_texref( "nodevec_tex_ref" )
self.node_texture_ref.set_format( cuda.array_format.UNSIGNED_INT32, 4 )
self.extra_node_texture_ref = module.get_texref( "extra_node_tex_ref" )
self.extra_node_texture_ref.set_format( cuda.array_format.UNSIGNED_INT32, 4 )
self.vertices_texture_ref = module.get_texref( "verticesvec_tex_ref" )
self.vertices_texture_ref.set_format( cuda.array_format.FLOAT, 4 )
self.triangles_texture_ref = module.get_texref( "trianglesvec_tex_ref" )
self.triangles_texture_ref.set_format( cuda.array_format.UNSIGNED_INT32, 4 )
self.node_texture_ref_bound = False
elif api.is_gpu_api_opencl():
# texture usage not used at the moment
pass
def get(self):
ncols = 3
if api.is_gpu_api_opencl():
ncols = 4 # must include padding
pos = self.pos.get().view(np.float32).reshape((len(self.pos),ncols))
dir = self.dir.get().view(np.float32).reshape((len(self.dir),ncols))
pol = self.pol.get().view(np.float32).reshape((len(self.pol),ncols))
wavelengths = self.wavelengths.get()
t = self.t.get()
last_hit_triangles = self.last_hit_triangles.get()
flags = self.flags.get()
weights = self.weights.get()
return event.Photons(pos, dir, pol, wavelengths, t, last_hit_triangles, flags, weights)
def iterate_copies(self):
'''Returns an iterator that yields GPUPhotonsSlice objects
corresponding to the event copies stored in ``self``.'''
for i in xrange(self.ncopies):
window = slice(self.true_nphotons*i, self.true_nphotons*(i+1))
yield GPUPhotonsSlice(pos=self.pos[window],
dir=self.dir[window],
pol=self.pol[window],
wavelengths=self.wavelengths[window],
t=self.t[window],
last_hit_triangles=self.last_hit_triangles[window],
flags=self.flags[window],
weights=self.weights[window])
@profile_if_possible
def propagate(self, gpu_geometry, rng_states, nthreads_per_block=64,
max_blocks=1024, max_steps=10, use_weights=False,
scatter_first=0, cl_context=None):
"""Propagate photons on GPU to termination or max_steps, whichever
comes first.
May be called repeatedly without reloading photon information if
single-stepping through photon history.
..warning::
`rng_states` must have at least `nthreads_per_block`*`max_blocks`
number of curandStates.
"""
nphotons = self.pos.size
# bind node texture reference
if api.is_gpu_api_cuda() and not self.node_texture_ref_bound:
# we have to unroll, as pycuda doesn't seem to support vector times right now for binding
self.unrolled_nodes = ga.to_gpu( gpu_geometry.nodes.get().ravel().view( np.uint32 ) )
self.unrolled_extra_nodes = ga.to_gpu( gpu_geometry.extra_nodes.ravel().view( np.uint32 ) )
self.unrolled_triangles = ga.to_gpu( gpu_geometry.triangles.get().ravel().view( np.uint32 ) )
self.unrolled_triangles4 = ga.to_gpu( gpu_geometry.triangles4.ravel().view( np.uint32 ) )
self.unrolled_vertices = ga.to_gpu( gpu_geometry.vertices.get().ravel().view( np.float32 ) )
self.unrolled_vertices4 = ga.to_gpu( gpu_geometry.vertices4.ravel().view( np.float32 ) )
self.node_texture_ref.set_address( self.unrolled_nodes.gpudata, self.unrolled_nodes.nbytes )
self.extra_node_texture_ref.set_address( self.unrolled_extra_nodes.gpudata, self.unrolled_extra_nodes.nbytes )
#self.unrolled_nodes.bind_to_texref_ext( self.node_texture_ref )
#self.unrolled_extra_nodes.bind_to_texref_ext( self.extra_node_texture_ref )
#self.unrolled_triangles.bind_to_texref_ext( self.triangles_texture_ref )
self.triangles_texture_ref.set_address( self.unrolled_triangles4.gpudata, self.unrolled_triangles4.nbytes )
#self.unrolled_vertices.bind_to_texref_ext( self.vertices_texture_ref )
self.vertices_texture_ref.set_address( self.unrolled_vertices4.gpudata, self.unrolled_vertices4.nbytes )
print "[BOUND TO TEXTURE MEMORY]"
print "Nodes: ",self.unrolled_nodes.nbytes/1.0e3," kbytes"
print "Extra nodes: ",self.unrolled_extra_nodes.nbytes/1.0e3," kbytes"
print "Triangles: ",self.unrolled_triangles4.nbytes/1.0e3," kbytes"
print "Vertices: ",self.unrolled_vertices4.nbytes/1.0e3," kbytes"
print "Total: ",(self.unrolled_nodes.nbytes+self.unrolled_extra_nodes.nbytes+self.unrolled_triangles4.nbytes+self.unrolled_vertices4.nbytes)/1.0e3,"kbytes"
self.node_texture_ref_bound = True
# setup queue
maxqueue = nphotons
step = 0
input_queue = np.empty(shape=maxqueue+1, dtype=np.uint32)
input_queue[0] = 0
# Order photons initially in the queue to put the clones next to each other
for copy in xrange(self.ncopies):
input_queue[1+copy::self.ncopies] = np.arange(self.true_nphotons, dtype=np.uint32) + copy * self.true_nphotons
if api.is_gpu_api_cuda():
input_queue_gpu = ga.to_gpu(input_queue)
elif api.is_gpu_api_opencl():
comqueue = cl.CommandQueue(cl_context)
input_queue_gpu = ga.to_device(comqueue,input_queue[1:]) # why the offset?
output_queue = np.zeros(shape=maxqueue+1, dtype=np.uint32)
output_queue[0] = 1
if api.is_gpu_api_cuda():
output_queue_gpu = ga.to_gpu(output_queue)
elif api.is_gpu_api_opencl():
output_queue_gpu = ga.to_device(comqueue,output_queue)
if use_weights:
iuse_weights = 1
else:
iuse_weights = 0
adapt_factor = 1.0
start_prop = time.time()
while step < max_steps:
# Just finish the rest of the steps if the # of photons is low
#if nphotons < nthreads_per_block * 16 * 8 or use_weights:
# nsteps = max_steps - step
#else:
# nsteps = 1
nsteps = 1
start_step = time.time()
for first_photon, photons_this_round, blocks in \
chunk_iterator(nphotons, nthreads_per_block, max( int(adapt_factor*max_blocks), 1 )):
#print nphotons, nthreads_per_block, max_blocks," : ",first_photon, photons_this_round, blocks, adapt_factor
start_chunk = time.time()
if api.is_gpu_api_cuda():
self.gpu_funcs.propagate(np.int32(first_photon), np.int32(photons_this_round),
input_queue_gpu[1:], output_queue_gpu, rng_states,
self.pos, self.dir, self.wavelengths, self.pol, self.t, self.flags, self.last_hit_triangles,
self.weights, np.int32(nsteps), np.int32(iuse_weights), np.int32(scatter_first),
gpu_geometry.gpudata, block=(nthreads_per_block,1,1), grid=(blocks, 1))
#cuda.Context.get_current().synchronize()
elif api.is_gpu_api_opencl():
self.gpu_funcs.propagate( comqueue, (photons_this_round,1,1), None,
np.int32(first_photon), np.int32(photons_this_round),
input_queue_gpu.data, output_queue_gpu.data,
rng_states.data,
self.pos.data, self.dir.data, self.wavelengths.data, self.pol.data, self.t.data,
self.flags.data, self.last_hit_triangles.data, self.weights.data,
np.int32(nsteps), np.int32(iuse_weights), np.int32(scatter_first),
gpu_geometry.world_scale, gpu_geometry.world_origin.data, np.int32(len(gpu_geometry.nodes)),
gpu_geometry.material_data['n'], gpu_geometry.material_data['step'], gpu_geometry.material_data["wavelength0"],
gpu_geometry.vertices.data, gpu_geometry.triangles.data,
gpu_geometry.material_codes.data, gpu_geometry.colors.data,
gpu_geometry.nodes.data, gpu_geometry.extra_nodes.data,
gpu_geometry.material_data["nmaterials"],
gpu_geometry.material_data['refractive_index'].data, gpu_geometry.material_data['absorption_length'].data,
gpu_geometry.material_data['scattering_length'].data,
gpu_geometry.material_data['reemission_prob'].data, gpu_geometry.material_data['reemission_cdf'].data,
gpu_geometry.surface_data['nsurfaces'],
gpu_geometry.surface_data['detect'].data, gpu_geometry.surface_data['absorb'].data, gpu_geometry.surface_data['reemit'].data,
gpu_geometry.surface_data['reflect_diffuse'].data, gpu_geometry.surface_data['reflect_specular'].data,
gpu_geometry.surface_data['eta'].data, gpu_geometry.surface_data['k'].data, gpu_geometry.surface_data['reemission_cdf'].data,
gpu_geometry.surface_data['model'].data, gpu_geometry.surface_data['transmissive'].data, gpu_geometry.surface_data['thickness'].data,
gpu_geometry.surface_data['nplanes'].data, gpu_geometry.surface_data['wire_diameter'].data, gpu_geometry.surface_data['wire_pitch'].data,
g_times_l=True ).wait()
end_chunk = time.time()
chunk_time = end_chunk-start_chunk
#print "chunk time: ",chunk_time
#if chunk_time>2.5:
# adapt_factor *= 0.5
step += nsteps
scatter_first = 0 # Only allow non-zero in first pass
end_step = time.time()
#print "step time: ",end_step-start_step
if step < max_steps:
start_requeue = time.time()
#print "reset photon queues"
if api.is_gpu_api_cuda():
cuda.Context.get_current().synchronize() # ensure all threads done
#temp = input_queue_gpu
#input_queue_gpu = output_queue_gpu
#output_queue_gpu = temp
# Assign with a numpy array of length 1 to silence
# warning from PyCUDA about setting array with different strides/storage orders.
#output_queue_gpu[:1].set(np.ones(shape=1, dtype=np.uint32))
#nphotons = input_queue_gpu[:1].get()[0] - 1
# new style
output_queue_gpu.get( output_queue )
nphotons = output_queue[0]-1
input_queue_gpu.set( output_queue )
output_queue_gpu[:1].set(np.ones(shape=1,dtype=np.uint32))
elif api.is_gpu_api_opencl():
temp_out = output_queue_gpu.get()
nphotons = temp_out[0]
input_queue_gpu.set( temp_out[1:], queue=comqueue ) # set the input queue to have index of photons still need to be run
output_queue_gpu[:1].set( np.ones(shape=1,dtype=np.uint32), queue=comqueue ) # reset first instance to be one
end_requeue = time.time()
#print "re-queue time (nphotons=",nphotons"): ",end_requeue-start_requeue
if nphotons==0:
break
end_prop = time.time()
print "propagation time: ",end_prop-start_prop," secs"
end_flags = self.flags.get()
end_flag = np.max(end_flags)
if end_flag & (1 << 31):
print >>sys.stderr, "WARNING: ABORTED PHOTONS"
if api.is_gpu_api_cuda():
cuda.Context.get_current().synchronize()
elif api.is_gpu_api_opencl():
cl.enqueue_barrier( comqueue )
@profile_if_possible
def select(self, target_flag, nthreads_per_block=64, max_blocks=1024,
start_photon=None, nphotons=None):
'''Return a new GPUPhoton object containing only photons that
have a particular bit set in their history word.'''
cuda.Context.get_current().synchronize()
index_counter_gpu = ga.zeros(shape=1, dtype=np.uint32)
cuda.Context.get_current().synchronize()
if start_photon is None:
start_photon = 0
if nphotons is None:
nphotons = self.pos.size - start_photon
# First count how much space we need
for first_photon, photons_this_round, blocks in \
chunk_iterator(nphotons, nthreads_per_block, max_blocks):
self.gpu_funcs.count_photons(np.int32(start_photon+first_photon),
np.int32(photons_this_round),
np.uint32(target_flag),
index_counter_gpu, self.flags,
block=(nthreads_per_block,1,1),
grid=(blocks, 1))
cuda.Context.get_current().synchronize()
reduced_nphotons = int(index_counter_gpu.get()[0])
# Then allocate new storage space
pos = ga.empty(shape=reduced_nphotons, dtype=ga.vec.float3)
dir = ga.empty(shape=reduced_nphotons, dtype=ga.vec.float3)
pol = ga.empty(shape=reduced_nphotons, dtype=ga.vec.float3)
wavelengths = ga.empty(shape=reduced_nphotons, dtype=np.float32)
t = ga.empty(shape=reduced_nphotons, dtype=np.float32)
last_hit_triangles = ga.empty(shape=reduced_nphotons, dtype=np.int32)
flags = ga.empty(shape=reduced_nphotons, dtype=np.uint32)
weights = ga.empty(shape=reduced_nphotons, dtype=np.float32)
# And finaly copy photons, if there are any
if reduced_nphotons > 0:
index_counter_gpu.fill(0)
for first_photon, photons_this_round, blocks in \
chunk_iterator(nphotons, nthreads_per_block, max_blocks):
self.gpu_funcs.copy_photons(np.int32(start_photon+first_photon),
np.int32(photons_this_round),
np.uint32(target_flag),
index_counter_gpu,
self.pos, self.dir, self.wavelengths, self.pol, self.t, self.flags, self.last_hit_triangles, self.weights,
pos, dir, wavelengths, pol, t, flags, last_hit_triangles, weights,
block=(nthreads_per_block,1,1),
grid=(blocks, 1))
assert index_counter_gpu.get()[0] == reduced_nphotons
return GPUPhotonsSlice(pos, dir, pol, wavelengths, t, last_hit_triangles, flags, weights)
def __del__(self):
del self.pos
del self.dir
del self.pol
del self.wavelengths
del self.t
del self.flags
del self.last_hit_triangles
# Free up GPU memory quickly if now available
gc.collect()
def __len__(self):
return self.pos.size
class GPUPhotonsSlice(GPUPhotons):
'''A `slice`-like view of a subrange of another GPU photons array.
Works exactly like an instance of GPUPhotons, but the GPU storage
is taken from another GPUPhotons instance.
Returned by the GPUPhotons.iterate_copies() iterator.'''
def __init__(self, pos, dir, pol, wavelengths, t, last_hit_triangles, flags, weights):
'''Create new object using slices of GPUArrays from an instance
of GPUPhotons. NOTE THESE ARE NOT CPU ARRAYS!'''
self.pos = pos
self.dir = dir
self.pol = pol
self.wavelengths = wavelengths
self.t = t
self.last_hit_triangles = last_hit_triangles
self.flags = flags
self.weights = weights
module = get_cu_module('propagate.cu', options=cuda_options)
self.gpu_funcs = GPUFuncs(module)
self.true_nphotons = len(pos)
self.ncopies = 1
def __del__(self):
pass # Do nothing, because we don't own any of our GPU memory
|
"""This module finds similar songs based on common adjectives.
Note: This module is based on the module lyrics_topics but is not included
in or combined with lyrics_topics to avoid confusion and to allow the
possibility of working with only one method to find similar lyrics since
they do not lead to equally good or bad results.
Functions:
The following functions can be used without an XML tree:
get_adjectives(string) -> list
adjectives_sorted(string) -> Counter
find_repeated_adjectives(string) -> list
get_duplicates(list) -> list
The following functions can only be used with an XML tree:
find_similar_songs(xml.etree.ElementTree.Element,
xml.etree.ElementTree.Element) -> list
query_get_song_recommendation(string, string, xml.etree.ElementTree.Element)
-> string
"""
from collections import Counter
import spacy
import song_information
nlp = spacy.load(("en_core_web_sm"))
def get_adjectives(lyrics):
"""Finds all adjectives from the lyrics.
Args:
lyrics: A string containing the lyrics of a song.
Returns:
A list of all adjectives found in the lyrics.
"""
doc = nlp(lyrics.lower())
all_adjectives = [token.lemma_ for token in doc if token.pos_ == "ADJ"]
return all_adjectives
def adjectives_sorted(lyrics):
"""Creates a Counter of all adjectives from the lyrics.
Args:
lyrics: A string containing the lyrics of a song.
Returns:
A Counter of all adjectives found in the lyrics.
"""
adjectives = get_adjectives(lyrics)
sorted_adjectives = Counter(adjectives)
return sorted_adjectives
def find_repeated_adjectives(lyrics):
"""Creates a list of all repeating adjectives from the lyrics.
Args:
lyrics: A string containing the lyrics of a song.
Returns:
A list of all adjectives found more than once in the lyrics.
"""
adjectives = adjectives_sorted(lyrics)
repeated_adjectives = [key for key, value in adjectives.most_common()
if value > 1]
return repeated_adjectives
def get_duplicates(song_list):
"""Finds all duplicates in a list.
Args:
song_list: A list of "song by artist" strings with which this
function is called from find_similar_songs.
Returns:
A list of all strings found more than once in the input list.
"""
duplicates = [key for key in Counter(song_list).keys()
if Counter(song_list)[key] > 1]
return duplicates
def find_similar_songs(song, root):
"""Finds all similar songs to a song which are stored as children of an XML
corpus.
Args:
song: A child of an ElementTree.
root: The root of the ElementTree which has the child song.
Returns:
A list of all songs that have at least two adjectives in common
with the passed song.
"""
lyrics = song_information.get_lyrics(song)
adjectives = find_repeated_adjectives(lyrics)
similar_songs = []
for child in root:
if child != song:
lyrics_child = song_information.get_lyrics(child)
adjectives_child = find_repeated_adjectives(lyrics_child)
for topic in adjectives_child:
if topic in adjectives:
song_artist = ("'" + song_information.get_songtitle(child)
+ "' by " + song_information.get_artist(child))
similar_songs.append(song_artist)
result = get_duplicates(similar_songs)
return result
def query_get_song_recommendation(songtitle, artist, root):
"""Tries to recommend similar songs to the requested song.
Args:
songtitle: A string containing a song name.
artist: A string containing the artist of the song.
root: The root of the ElementTree.
Returns:
A string message including which similar song(s) to the requested song
the inquirer might like or an apology if either the song could not be
found in the corpus or if a similar song could not be found.
"""
for child in root:
if (song_information.get_songtitle(child) == songtitle
and song_information.get_artist(child) == artist):
song = child
else:
answer = ("Sorry, '" + songtitle + "' by " + artist
+ "could not be found in this corpus")
similar_songs = find_similar_songs(song, root)
if len(similar_songs) > 0:
answer = ("If you like '" + songtitle + "' by " + artist
+ ", you might like " + ", ".join(similar_songs))
else:
answer = ("Sorry, there is no similar song to '" + songtitle + "' by "
+ artist + " in this corpus")
return answer
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2008-2009 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Tests for Qt Linguist storage class"""
from lxml import etree
from translate.misc.multistring import multistring
from translate.storage import ts2 as ts
from translate.storage import test_base
from translate.storage.placeables import parse
from translate.storage.placeables import xliff
from translate.storage.placeables.lisa import xml_to_strelem
xliffparsers = []
for attrname in dir(xliff):
attr = getattr(xliff, attrname)
if type(attr) is type and \
attrname not in ('XLIFFPlaceable') and \
hasattr(attr, 'parse') and \
attr.parse is not None:
xliffparsers.append(attr.parse)
def rich_parse(s):
return parse(s, xliffparsers)
class TestTSUnit(test_base.TestTranslationUnit):
UnitClass = ts.tsunit
class TestTSfile(test_base.TestTranslationStore):
StoreClass = ts.tsfile
def test_basic(self):
tsfile = ts.tsfile()
assert tsfile.units == []
tsfile.addsourceunit("Bla")
assert len(tsfile.units) == 1
newfile = ts.tsfile.parsestring(str(tsfile))
print str(tsfile)
assert len(newfile.units) == 1
assert newfile.units[0].source == "Bla"
assert newfile.findunit("Bla").source == "Bla"
assert newfile.findunit("dit") is None
def test_source(self):
tsfile = ts.tsfile()
tsunit = tsfile.addsourceunit("Concept")
tsunit.source = "Term"
newfile = ts.tsfile.parsestring(str(tsfile))
print str(tsfile)
assert newfile.findunit("Concept") is None
assert newfile.findunit("Term") is not None
def test_target(self):
tsfile = ts.tsfile()
tsunit = tsfile.addsourceunit("Concept")
tsunit.target = "Konsep"
newfile = ts.tsfile.parsestring(str(tsfile))
print str(tsfile)
assert newfile.findunit("Concept").target == "Konsep"
def test_plurals(self):
"""Test basic plurals"""
tsfile = ts.tsfile()
tsunit = tsfile.addsourceunit("File(s)")
tsunit.target = [u"Leêr", u"Leêrs"]
newfile = ts.tsfile.parsestring(str(tsfile))
print str(tsfile)
checkunit = newfile.findunit("File(s)")
assert checkunit.target == [u"Leêr", u"Leêrs"]
assert checkunit.hasplural()
def test_language(self):
"""Check that we can get and set language and sourcelanguage
in the header"""
tsstr = '''<!DOCTYPE TS>
<TS version="2.0" language="fr" sourcelanguage="de">
</TS>
'''
tsfile = ts.tsfile.parsestring(tsstr)
assert tsfile.gettargetlanguage() == 'fr'
assert tsfile.getsourcelanguage() == 'de'
tsfile.settargetlanguage('pt_BR')
assert 'pt_BR' in str(tsfile)
assert tsfile.gettargetlanguage() == 'pt-br'
# We convert en_US to en
tsstr = '''<!DOCTYPE TS>
<TS version="2.0" language="fr" sourcelanguage="en_US">
</TS>
'''
tsfile = ts.tsfile.parsestring(tsstr)
assert tsfile.getsourcelanguage() == 'en'
def test_locations(self):
"""test that locations work well"""
tsstr = '''<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE TS>
<TS version="2.0" language="hu">
<context>
<name>MainWindow</name>
<message>
<location filename="../tools/qtconfig/mainwindow.cpp" line="+202"/>
<source>Desktop Settings (Default)</source>
<translation>Asztali beállítások (Alapértelmezett)</translation>
</message>
<message>
<location line="+5"/>
<source>Choose style and palette based on your desktop settings.</source>
<translation>Stílus és paletta alapú kiválasztása az asztali beállításokban.</translation>
</message>
</context>
</TS>
'''
tsfile = ts.tsfile.parsestring(tsstr)
assert len(tsfile.units) == 2
assert tsfile.units[0].getlocations() == ['../tools/qtconfig/mainwindow.cpp:+202']
assert tsfile.units[1].getlocations() == ['+5']
def test_merge_with_fuzzies(self):
"""test that merge with fuzzy works well"""
tsstr1 = '''<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE TS>
<TS version="2.0" language="hu">
<context>
<name>MainWindow</name>
<message>
<location filename="../tools/qtconfig/mainwindow.cpp" line="+202"/>
<source>Desktop Settings (Default)</source>
<translation type="unfinished">Asztali beállítások (Alapértelmezett)</translation>
</message>
<message>
<location line="+5"/>
<source>Choose style and palette based on your desktop settings.</source>
<translation>Stílus és paletta alapú kiválasztása az asztali beállításokban.</translation>
</message>
</context>
</TS>
'''
tsstr2 = '''<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE TS>
<TS version="2.0" language="hu">
<context>
<name>MainWindow</name>
<message>
<location filename="../tools/qtconfig/mainwindow.cpp" line="+202"/>
<source>Desktop Settings (Default)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Choose style and palette based on your desktop settings.</source>
<translation type="unfinished"/>
</message>
</context>
</TS>
'''
tsfile = ts.tsfile.parsestring(tsstr1)
tsfile2 = ts.tsfile.parsestring(tsstr2)
assert len(tsfile.units) == 2
assert len(tsfile2.units) == 2
tsfile2.units[0].merge(tsfile.units[0]) #fuzzy
tsfile2.units[1].merge(tsfile.units[1]) #not fuzzy
assert tsfile2.units[0].isfuzzy() == True
assert tsfile2.units[1].isfuzzy() == False
|
# Generated by Django 2.1.7 on 2019-07-30 14:40
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='subjectinfo',
name='subject_id',
field=models.CharField(default=uuid.UUID('629eebf7-b35d-4cf5-beae-a25917c9ced7'), editable=False, max_length=50, primary_key=True, serialize=False, verbose_name='科目ID'),
),
migrations.AlterField(
model_name='userprofile',
name='user_id',
field=models.CharField(default=uuid.UUID('1fc50d2b-b434-42da-b594-62a263b3173c'), editable=False, max_length=50, primary_key=True, serialize=False, verbose_name='用户ID'),
),
]
|
import requests
import base64
serverip = "172.20.10.3"
port = "8080"
router = "detectionMinic"
url = "http://" + serverip + ":" + port + "/" + router
def request(imgFile):
with open(imgFile, 'rb') as f:
rdata = f.read()
e64data = base64.b64encode(rdata)
prm = {'img': e64data}
ret = requests.post(url, prm)
print(ret.text)
def request2():
prm = {'img': "hello world", "size": "128"}
ret = requests.post(url, prm)
print(ret.text)
def request3(imgFile):
with open(imgFile, 'rb') as f:
rdata = f.read()
prm = {'img': rdata}
ret = requests.post(url, prm)
print(ret.text)
if __name__ == '__main__':
request("test.jpg")
# request2()
# request3('test.jpg')
# main()
# imgFile = '../test.jpg'
# img = cv2.imread(imgFile)
# img2 = cv2.resize(img, (10,10))
# cv2.imwrite('../test5.jpg', img2)
|
import os
import matplotlib.pyplot as plt
import pandas as pd
from lstchain.io.io import dl2_params_lstcam_key
from lstchain.visualization import plot_dl2
def test_plot_disp(simulated_dl2_file):
dl2_df = pd.read_hdf(simulated_dl2_file, key=dl2_params_lstcam_key)
plot_dl2.plot_disp(dl2_df)
def test_direction_results(tmp_path, simulated_dl2_file):
dl2_df = pd.read_hdf(simulated_dl2_file, key=dl2_params_lstcam_key)
# Strings are required as input for the output files not PosixPath
plot_dl2.direction_results(
dl2_df,
points_outfile=os.path.join(tmp_path, 'dir.h5'),
plot_outfile=os.path.join(tmp_path, 'dir.png')
)
def test_energy_results(tmp_path, simulated_dl2_file):
dl2_df = pd.read_hdf(simulated_dl2_file, key=dl2_params_lstcam_key)
# Strings are required as input for the output files not PosixPath
plot_dl2.energy_results(
dl2_df,
points_outfile=os.path.join(tmp_path, 'ene.h5'),
plot_outfile=os.path.join(tmp_path, 'ene.png')
)
def test_plot_models_features_importances(rf_models):
fig, axes = plt.subplots(2, 2, figsize=(15, 10))
plot_dl2.plot_models_features_importances(rf_models["path"], axes=axes, alpha=0.5, fill=False)
|
from dbnd._core.tracking.schemas.base import ApiStrictSchema
from dbnd._vendor.marshmallow import fields
class LogMessageSchema(ApiStrictSchema):
source = fields.String(allow_none=True)
stack_trace = fields.String(allow_none=True)
timestamp = fields.DateTime(allow_none=True)
dbnd_version = fields.String(allow_none=True, missing=None)
|
# This file is adapted from the perturbseq library by Thomas Norman
# https://github.com/thomasmaxwellnorman/perturbseq_demo/blob/master/perturbseq/cell_cycle.py
import pandas as pd
import numpy as np
from collections import OrderedDict
from scipy.sparse import issparse
from ..tools.utils import einsum_correlation, log1p_
def group_corr(adata, layer, gene_list):
"""Measures the correlation of all genes within a list to the average expression of all genes within that
list (used for cell cycle position calling)
Arguments
---------
adata: an anndata object.
layer: `str` or None (default: `None`)
The layer of data to use for calculating correlation. If None, use adata.X.
gene_list: list of gene names
Returns
---------
(valid_gene_list, corr): A tuple of valid gene names and the correlation coefficient of each gene with
the mean expression of all.
"""
# returns list of correlations of each gene within a list of genes with the total expression of the group
intersect_genes = adata.var_names.intersection(gene_list)
if len(intersect_genes) == 0:
raise Exception(f"your adata doesn't have any gene from the gene_list {gene_list}.")
if layer is None:
expression_matrix = adata[:, intersect_genes].X
else:
expression_matrix = adata[:, intersect_genes].layers[layer]
expression_matrix = log1p_(adata, expression_matrix)
avg_exp = expression_matrix.mean(axis=1)
cor = einsum_correlation(np.array(expression_matrix.A.T, dtype='float'), np.array(avg_exp.A1, dtype='float')) if issparse(expression_matrix) \
else einsum_correlation(np.array(expression_matrix.T, dtype='float'), np.array(avg_exp, dtype='float'))
return np.array(intersect_genes), cor.flatten()
def refine_gene_list(adata, layer, gene_list, threshold, return_corrs=False):
"""Refines a list of genes by removing those that don't correlate well with the average expression of
those genes
Parameters
----------
adata: an anndata object.
layer: `str` or None (default: `None`)
The layer of data to use for calculating correlation. If None, use adata.X.
gene_list: list of gene names
threshold: threshold on correlation coefficient used to discard genes (expression of each gene is
compared to the bulk expression of the group and any gene with a correlation coefficient less
than this is discarded)
return_corrs: whether to return the correlations along with the gene names (default: False)
Returns
-------
Refined list of genes that are well correlated with the average expression trend
"""
gene_list, corrs = group_corr(adata, layer, gene_list)
if (return_corrs):
return corrs[corrs >= threshold]
else:
return gene_list[corrs >= threshold]
def group_score(adata, layer, gene_list):
"""Scores cells within population for expression of a set of genes. Raw expression data are first
log transformed, then the values are summed, and then scores are Z-normalized across all cells.
Arguments
---------
adata: an anndata object.
layer: `str` or None (default: `None`)
The layer of data to use for calculating correlation. If None, use adata.X.
gene_list: list of gene names
Returns
-------
Z-scored expression data
"""
intersect_genes = adata.var_names.intersection(gene_list)
if len(intersect_genes) == 0:
raise Exception(f"your adata doesn't have any gene from the gene_list {gene_list}.")
if layer is None:
expression_matrix = adata[:, intersect_genes].X
else:
expression_matrix = adata[:, intersect_genes].layers[layer]
expression_matrix = log1p_(adata, expression_matrix)
if layer is None or layer.startswith('X_'):
scores = expression_matrix.sum(1).A1 if issparse(expression_matrix) \
else expression_matrix.sum(1)
else:
if issparse(expression_matrix):
expression_matrix.data = np.log(expression_matrix.data + 1)
scores = expression_matrix.sum(1).A1
else:
scores = np.log(expression_matrix + 1).sum(1)
scores = (scores - scores.mean())/scores.std()
return scores
def batch_group_score(adata, layer, gene_lists):
"""Scores cells within population for expression of sets of genes. Raw expression data are first
log transformed, then the values are summed, and then scores are Z-normalized across all cells.
Returns an OrderedDict of each score.
Arguments
---------
adata: an anndata object.
layer: `str` or None (default: `None`)
The layer of data to use for calculating correlation. If None, use adata.X.
gene_lists: list of lists of gene names
Returns
-------
an OrderedDict of each score.
"""
batch_scores = OrderedDict()
for gene_list in gene_lists:
batch_scores[gene_list] = group_score(adata, layer, gene_lists[gene_list])
return batch_scores
def get_cell_phase_genes(adata, layer, refine=True, threshold=0.3):
"""Returns a list of cell-cycle-regulated marker genes, filtered for coherence
Arguments
---------
adata: an anndata object.
layer: `str` or None (default: `None`)
The layer of data to use for calculating correlation. If None, use adata.X.
refine: `bool` (default: `True`)
whether to refine the gene lists based on how consistent the expression is among
the groups
threshold: `float` or None (default: `0.3`)
threshold on correlation coefficient used to discard genes (expression of each
gene is compared to the bulk expression of the group and any gene with a correlation
coefficient less than this is discarded)
Returns
-------
a list of cell-cycle-regulated marker genes that show strong co-expression
"""
cell_phase_genes = OrderedDict()
cell_phase_genes['G1-S'] = pd.Series(['ARGLU1', 'BRD7', 'CDC6', 'CLSPN', 'ESD', 'GINS2',
'GMNN', 'LUC7L3', 'MCM5', 'MCM6', 'NASP', 'PCNA',
'PNN', 'SLBP', 'SRSF7', 'SSR3', 'ZRANB2'])
cell_phase_genes['S'] = pd.Series(['ASF1B', 'CALM2', 'CDC45', 'CDCA5', 'CENPM', 'DHFR',
'EZH2', 'FEN1', 'HIST1H2AC', 'HIST1H4C', 'NEAT1',
'PKMYT1', 'PRIM1', 'RFC2', 'RPA2', 'RRM2', 'RSRC2',
'SRSF5', 'SVIP', 'TOP2A', 'TYMS', 'UBE2T', 'ZWINT'])
cell_phase_genes['G2-M'] = pd.Series(['AURKB', 'BUB3', 'CCNA2', 'CCNF', 'CDCA2', 'CDCA3',
'CDCA8', 'CDK1', 'CKAP2', 'DCAF7', 'HMGB2', 'HN1',
'KIF5B', 'KIF20B', 'KIF22', 'KIF23', 'KIFC1', 'KPNA2',
'LBR', 'MAD2L1', 'MALAT1', 'MND1', 'NDC80', 'NUCKS1',
'NUSAP1', 'PIF1', 'PSMD11', 'PSRC1', 'SMC4', 'TIMP1',
'TMEM99', 'TOP2A', 'TUBB', 'TUBB4B', 'VPS25'])
cell_phase_genes['M'] = pd.Series(['ANP32B', 'ANP32E', 'ARL6IP1', 'AURKA', 'BIRC5', 'BUB1',
'CCNA2', 'CCNB2', 'CDC20', 'CDC27', 'CDC42EP1', 'CDCA3',
'CENPA', 'CENPE', 'CENPF', 'CKAP2', 'CKAP5', 'CKS1B',
'CKS2', 'DEPDC1', 'DLGAP5', 'DNAJA1', 'DNAJB1', 'GRK6',
'GTSE1', 'HMG20B', 'HMGB3', 'HMMR', 'HN1', 'HSPA8',
'KIF2C', 'KIF5B', 'KIF20B', 'LBR', 'MKI67', 'MZT1',
'NUF2', 'NUSAP1', 'PBK', 'PLK1', 'PRR11', 'PSMG3', 'PWP1',
'RAD51C', 'RBM8A', 'RNF126', 'RNPS1', 'RRP1', 'SFPQ',
'SGOL2', 'SMARCB1', 'SRSF3', 'TACC3', 'THRAP3', 'TPX2',
'TUBB4B', 'UBE2D3', 'USP16', 'WIBG', 'YWHAH', 'ZNF207'])
cell_phase_genes['M-G1'] = pd.Series(['AMD1', 'ANP32E', 'CBX3', 'CDC42', 'CNIH4', 'CWC15',
'DKC1', 'DNAJB6', 'DYNLL1', 'EIF4E', 'FXR1', 'GRPEL1',
'GSPT1', 'HMG20B', 'HSPA8', 'ILF2', 'KIF5B', 'KPNB1',
'LARP1', 'LYAR', 'MORF4L2', 'MRPL19', 'MRPS2', 'MRPS18B',
'NUCKS1', 'PRC1', 'PTMS', 'PTTG1', 'RAN', 'RHEB', 'RPL13A',
'SRSF3', 'SYNCRIP', 'TAF9', 'TMEM138', 'TOP1', 'TROAP',
'UBE2D3', 'ZNF593'])
if (refine):
for phase in cell_phase_genes:
cur_cell_phase_genes = cell_phase_genes[phase] if adata.var_names[0].isupper() \
else [i.capitalize() for i in cell_phase_genes[phase]]
cell_phase_genes[phase] = refine_gene_list(adata, layer, cur_cell_phase_genes, threshold)
return cell_phase_genes
def get_cell_phase(adata, layer=None, gene_list=None, refine=True, threshold=0.3):
"""Compute cell cycle phase scores for cells in the population
Arguments
---------
adata: an anndata object.
layer: `str` or None (default: `None`)
The layer of data to use for calculating correlation. If None, use adata.X.
gene_list: `OrderedDict` or None (default: `None`)
OrderedDict of marker genes to use for cell cycle phases. If None, the default
list will be used.
refine: `bool` (default: `True`)
whether to refine the gene lists based on how consistent the expression is among
the groups
threshold: `float` or None (default: `0.3`)
threshold on correlation coefficient used to discard genes (expression of each
gene is compared to the bulk expression of the group and any gene with a correlation
coefficient less than this is discarded)
Returns
-------
Cell cycle scores indicating the likelihood a given cell is in a given cell cycle phase
"""
# get list of genes if one is not provided
if gene_list is None:
cell_phase_genes = get_cell_phase_genes(adata, layer, refine=refine, threshold=threshold)
else:
cell_phase_genes = gene_list
# score each cell cycle phase and Z-normalize
phase_scores = pd.DataFrame(batch_group_score(adata, layer, cell_phase_genes))
normalized_phase_scores = phase_scores.sub(phase_scores.mean(axis=1), axis=0).div(phase_scores.std(axis=1), axis=0)
normalized_phase_scores_corr = normalized_phase_scores.transpose()
normalized_phase_scores_corr['G1-S'] = [1, 0, 0, 0, 0]
normalized_phase_scores_corr['S'] = [0, 1, 0, 0, 0]
normalized_phase_scores_corr['G2-M'] = [0, 0, 1, 0, 0]
normalized_phase_scores_corr['M'] = [0, 0, 0, 1, 0]
normalized_phase_scores_corr['M-G1'] = [0, 0, 0, 0, 1]
phase_list = ['G1-S', 'S', 'G2-M', 'M', 'M-G1']
# final scores for each phaase are correlation of expression profile with vectors defined above
cell_cycle_scores = normalized_phase_scores_corr.corr()[-len(phase_list):].transpose()[:-len(phase_list)]
# pick maximal score as the phase for that cell
cell_cycle_scores['cell_cycle_phase'] = cell_cycle_scores.idxmax(axis=1)
cell_cycle_scores['cell_cycle_phase'] = cell_cycle_scores['cell_cycle_phase'].astype('category')
cell_cycle_scores['cell_cycle_phase'].cat.set_categories(phase_list, inplace=True)
def progress_ratio(x, phase_list):
ind = phase_list.index(x['cell_cycle_phase'])
return x[phase_list[(ind - 1) % len(phase_list)]] - x[phase_list[(ind + 1) % len(phase_list)]]
# interpolate position within given cell cycle phase
cell_cycle_scores['cell_cycle_progress'] = cell_cycle_scores.apply(lambda x: progress_ratio(x, list(phase_list)),
axis=1)
cell_cycle_scores.sort_values(['cell_cycle_phase', 'cell_cycle_progress'],
ascending=[True, False],
inplace=True)
# order of cell within cell cycle phase
cell_cycle_scores['cell_cycle_order'] = cell_cycle_scores.groupby('cell_cycle_phase').cumcount()
cell_cycle_scores['cell_cycle_order'] = cell_cycle_scores.groupby('cell_cycle_phase')['cell_cycle_order'].apply(
lambda x: x / (len(x) - 1))
return cell_cycle_scores
def cell_cycle_scores(adata, layer=None, gene_list=None, refine=True, threshold=0.3):
"""Call cell cycle positions for cells within the population. If more direct control is desired,
use get_cell_phase.
Arguments
---------
adata: an anndata object.
layer: `str` or None (default: `None`)
The layer of data to use for calculating correlation. If None, use adata.X.
gene_list: OrderedDict of marker genes to use for cell cycle phases. If None, the default
list will be used.
refine: `bool` (default: `True`)
whether to refine the gene lists based on how consistent the expression is among
the groups
threshold: `float` or None (default: `0.3`)
threshold on correlation coefficient used to discard genes (expression of each
gene is compared to the bulk expression of the group and any gene with a correlation
coefficient less than this is discarded)
Returns
-------
Returns an updated adata object with cell_cycle_phase as new column in .obs and a new data
frame with `cell_cycle_scores` key to .obsm where the cell cycle scores indicating the likelihood a
given cell is in a given cell cycle phase.
"""
cell_cycle_scores = get_cell_phase(adata, layer=layer, refine=refine, gene_list=gene_list, threshold=threshold)
cell_cycle_scores.index = adata.obs_names[cell_cycle_scores.index.values.astype('int')]
adata.obs['cell_cycle_phase'] = cell_cycle_scores['cell_cycle_phase'].astype('category')
# adata.obsm['cell_cycle_scores'] = cell_cycle_scores.set_index(adata.obs_names)
adata.obsm['cell_cycle_scores'] = cell_cycle_scores.loc[adata.obs_names, :] #.values
|
from os import environ
# if you set a property in SESSION_CONFIG_DEFAULTS, it will be inherited by all configs
# in SESSION_CONFIGS, except those that explicitly override it.
# the session config can be accessed from methods in your apps as self.session.config,
# e.g. self.session.config['participation_fee']
SESSION_CONFIG_DEFAULTS = {
'real_world_currency_per_point': 1.00,
'participation_fee': 0.00,
'doc': "",
}
SESSION_CONFIGS = [
{
'name': 'qv',
'display_name': "Quadratic Voting",
'num_demo_participants': 1,
'app_sequence': ['qv'],
'Survey_Title': 'Survey'
},
]
# ISO-639 code
# for example: de, fr, ja, ko, zh-hans
LANGUAGE_CODE = 'en'
# e.g. EUR, GBP, CNY, JPY
REAL_WORLD_CURRENCY_CODE = 'USD'
USE_POINTS = True
ROOMS = []
ADMIN_USERNAME = 'admin'
# for security, best to set admin password in an environment variable
ADMIN_PASSWORD = environ.get('OTREE_ADMIN_PASSWORD')
DEMO_PAGE_INTRO_HTML = """ """
SECRET_KEY = 'aivcd7#1k#_z(pb7baw5tx^+4w=xtbh(hb-t-&3-xfxe^vixzx'
# if an app is included in SESSION_CONFIGS, you don't need to list it here
INSTALLED_APPS = ['otree']
|
import tensorflow as tf
# This part can be uncommented if GPU is available in computer system.
#tf.test.is_gpu_available
#from tensorflow.python.client import device_lib
#print(device_lib.list_local_devices())
### Modified on Feb 10 2021 due to tensorflow update
import os
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras import layers
from sklearn.metrics import r2_score
from sklearn import preprocessing
from tqdm import tqdm
from sklearn.preprocessing import StandardScaler
import math
from sklearn.preprocessing import MinMaxScaler
from time import time
np.set_printoptions(suppress=True)
def dl2(param_size, num_prop, file_name):
df_gold = pd.read_csv("dl-gold.csv")
df_gold = df_gold.iloc[:, 0:num_prop]
df_weight = pd.read_csv("dl-weight.csv")
df_weight = df_weight.iloc[:, 0:num_prop]
data = pd.read_csv(file_name)
data_size = data.shape[0]
properties = np.arange(0, num_prop)
x = data.loc[:,data.columns[range(param_size)]]
y = data.loc[:,data.columns[range(param_size, num_prop+param_size)]]
x = np.asarray(x).astype(np.float32)
y = np.asarray(y).astype(np.float32)
x_train_ns, x_test_ns, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
y_train_ns = y_train
#y_train_ns = y_train.astype("float32")
y_test_ns = y_test
#y_test_ns = y_test.astype("float32")
scaler = StandardScaler()
x_train = scaler.fit_transform(x_train_ns)
x_test = scaler.transform(x_test_ns)
y_train = scaler.fit_transform(y_train_ns)
y_test = scaler.transform(y_test_ns)
y_train= y_train_ns
y_test = y_test_ns
tf.keras.backend.clear_session()
# MODEL FOR REGRESSION PART
def build_model():
model = tf.keras.Sequential([
layers.Dense(num_prop,activation=tf.nn.relu, input_shape=[param_size]),
layers.Dense(num_prop, activation=tf.nn.relu),
layers.Dense(num_prop, activation='linear')
])
opt = tf.keras.optimizers.Adam(lr=0.001)
model.compile(loss='mean_absolute_error', optimizer=opt, metrics=['mean_absolute_error', 'mean_squared_error'])
return model
model = build_model()
#The early stopping algorithm
from tensorflow.keras.callbacks import EarlyStopping
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=100)
EPOCHS = 50000
pbar = tqdm(total=EPOCHS)
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
pbar.update()
history = model.fit(
x_train, y_train,
epochs=EPOCHS,
validation_data=(x_test, y_test),
verbose=0,
callbacks=[PrintDot(), es]
)
pbar.close()
scores = model.evaluate(x_test, y_test, verbose=0)
print("%s: %.2f" % (model.metrics_names[1], scores[1]))
file = open("accuracy.dat","a+")
file.write(str(scores[1]) + "\n")
file.close()
#tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
#
#
#weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'dense_2/kernel')
#
#
#
#init_op = tf.initialize_all_variables()
#
#
#with tf.Session() as sess:
# sess.run(init_op)
# ww = sess.run(weights)
#
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
model.save_weights("model.h5")
print("Saved model to disk")
return scores[1]
|
from distutils.core import setup
setup(name='pypp',
version='0.0',
description='post-processing eigendecompositions computed with paladin',
url='https://github.com/michael-a-hansen/paladin',
author='Mike Hansen',
author_email='mike.hansen.utah@gmail.com',
license='MIT',
packages=['pypp'],
install_requires=['numpy',
'scipy',
'matplotlib'],
zip_safe=False)
|
#!/usr/bin/env python3
import numpy as np
class Car:
"""
Kinematic model of a car-like robot with ref point on front axle
States: x, y, yaw, v
Inputs: a, delta
"""
def __init__(self, x=0, y=0, yaw=0, v=0):
self.x = 0
self.y = 0
self.yaw = 0
self.v = 0
self.a = 0
self.delta = 0
self.L = 3 # m
self.a_max = 1 # m/s^2
self.delta_max = 1.22 # rad
def _update_controls(self, v, delta):
self.a = np.fmin(np.fmax(v, -self.a_max), self.a_max)
self.delta = np.fmin(np.fmax(delta, -self.delta_max), self.delta_max)
def model(self, v, delta):
self._update_controls(v, delta)
state_dot = np.array([0., 0., 0., 0.])
state_dot[0] = self.v * np.cos(self.yaw + self.delta)
state_dot[1] = self.v * np.sin(self.yaw + self.delta)
state_dot[2] = self.v * np.sin(self.delta) / self.L
state_dot[3] = self.a
return state_dot
def step(self, a, delta, dt):
state_dot = self.model(a, delta)
state = self.get_state()
self.set_state(state + state_dot * dt)
def get_state(self):
state = np.array([0., 0., 0., 0.])
state[0] = self.x
state[1] = self.y
state[2] = self.yaw
state[3] = self.v
return state
def set_state(self, state):
self.x = state[0]
self.y = state[1]
self.yaw = state[2]
self.v = state[3]
|
import cv2
from matplotlib import pyplot as plt
from os import walk
import os
class LocalDescriptors:
def __init__(self):
pass
def orb_descriptor(self,file, nfeatures=500):
img = cv2.imread(file)
# Initiate STAR detector
orb = cv2.ORB_create(nfeatures=nfeatures)
# find the keypoints with ORB
kp = orb.detect(img, None)
# compute the descriptors with ORB
kp, des = orb.compute(img, kp)
if des is None:
return []
return des
def hog_descriptor(self,file,nfeatures=500):
pass
if __name__ == '__main__':
pass
|
# -*- coding: utf-8 -*-
'文件发送方'
import socket
import threading
import header
import os
import sys
def transfer_file(sock, file_path, print_type):
'发送文件'
# 准备发送文件
sock.send(header.SEND_FILE)
# 打开的文件
fp = None
while True:
data = header.unpack_msg(sock.recv(1024))
if data[0] == header.RECV_FILE:
# 可以发送文件了,先发送文件信息
file_size = os.path.getsize(file_path)
file_name = os.path.split(file_path)[1]
print u'INFO:文件名: {}, 文件大小: {}'.format(file_name, file_size)
#发送文件信息
sock.send(header.FILE_INFO + file_name.encode('utf-8') + header.SPLIT \
+ str(file_size) + header.SPLIT + str(print_type))
elif data[0] == header.START_TRANSFER:
# 开始传输文件,读入文件
try:
fp = open(file_path, 'rb')
except:
raise BaseException(u'读取文件失败')
# 发送第一段数据
buffer = fp.read(1000)
sock.send(header.DATA + buffer)
elif data[0] == header.NEXT:
# 发送下一段数据
buffer = fp.read(1000)
if buffer != '':
sock.send(header.DATA + buffer)
else:
# 文件传输完毕
sock.send(header.STOP_TRANSFER)
fp.close()
break
elif data[0] == header.NONE:
raise BaseException(u'未知的 header')
if __name__ == '__main__':
try:
if len(sys.argv) != 4:
print u'ERROR:参数个数不正确'
os._exit(0)
# 获取 ip,文件路径,打印类型
ip, file_path, print_type = sys.argv[1], sys.argv[2].decode('gbk').strip(), int(sys.argv[3])
if not os.path.isfile(file_path):
raise BaseException(u'文件不存在')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 建立连接
s.connect((ip, 8899))
# 发送文件
transfer_file(s, file_path, print_type)
s.close()
print u'SUCCESS'
except socket.gaierror as e:
print u'ERROR:socket 连接错误'
except BaseException as e:
message = u'未知错误' if e.message == '' or e.message == None else e.message
print u'ERROR:{}'.format(message)
|
import urllib2
import re
import slate
import pdfminer
url="http://45.32.111.231:8080/birt/frameset?__report=mydsi/exam/Exam_Result_Sheet_dsce.rptdesign&__format=pdf&USN="
usn = "1DS15IS00"
i=0
def main():
global i,usn
for i in range(1,9):
download_file(url+usn+str(i))
usn = "1DS15IS0"
for i in range(10,99):
print(usn)
download_file(url+usn+str(i))
usn="1DS15IS"
for i in range(134,200):
download_file(url+usn+str(i))
return 0
#download_file(url+"1DS15CS015")
def download_file(download_url):
response = urllib2.urlopen(download_url)
file = open("documentbast.pdf", 'wb')
file.write(response.read())
file.close()
with open('documentbast.pdf',"rb") as f:
doc = slate.PDF(f)
s1= str(doc)
print(s1)
f2 = open("mydata.txt","a")
res1 = re.findall(r"the Student:(\w+\s\w+)",s1)
tstr=str(res1)
if tstr == "[]":
res1 = re.findall(r"CODE(\w+\s\w+)",s1)
tstr=str(res1)
if tstr == "[]":
res1 = re.findall(r"the Student:(\w+)",s1)
tstr=str(res1)
res2 = re.findall(r'(\d\.\d*)', s1)
if str(res2)=="[]":
res2 = re.findall(r'SGPA\\n\\n(\d)', s1)
tstr = tstr + " "
tstr=tstr + str(res2)
tstr=re.sub("\[\'","",tstr,2)
tstr=re.sub("\'\]","",tstr,2)
print(tstr)
f2.write(usn+str(i)+" ")
f2.write(tstr)
f2.write("\n")
f2.close()
if __name__ == "__main__":
main()
|
""" Ex - 047 - Crie um programa que mostre na tela todos os números pares que estão no intervalo de 1 a 50"""
# Como eu Fiz
print(f'{"> Números Pares <":=^40}')
# Criar var:
num_par = []
# Criar laço de repetição:
for count in range(1, 51):
par = count % 2
if par == 0:
num_par.append(count)
print('{} Acabou'.format(num_par))
# Como o professor Guanabara fez
for n in range(2, 51, 2):
print(n, end=' ')
print('Acabou')
|
from flask import Flask, jsonify, request, render_template, redirect
import logging
from enum import Enum
app = Flask(__name__)
BootupStatus = Enum('BootupStatus', ('Initial', 'NeedBootup'))
current_bootup_status = BootupStatus.Initial
@app.route("/")
def index():
return render_template('index.html', current_bootup_status = current_bootup_status)
@app.route('/bootup', methods=['POST'])
def bootup():
global current_bootup_status
app.logger.info("Need Bootup ! ")
current_bootup_status = BootupStatus.NeedBootup
return redirect('/')
@app.route('/get-bootup-status', methods=['GET'])
def get_bootup_status():
return current_bootup_status.name
@app.route('/has-bootup', methods=['GET'])
def has_bootup():
app.logger.info("Has Bootup ! ")
global current_bootup_status
current_bootup_status = BootupStatus.Initial
return current_bootup_status.name
if __name__ == '__main__':
app.run(debug=True)
else:
gunicorn_logger = logging.getLogger('gunicorn.error')
app.logger.handlers = gunicorn_logger.handlers
app.logger.setLevel(gunicorn_logger.level)
|
"""Heatmap and dendograms"""
import matplotlib
import pylab
import scipy.cluster.hierarchy as hierarchy
import scipy.spatial.distance as distance
import numpy as np # get rid of this dependence
import easydev
import colormap
from biokit.viz.linkage import Linkage
__all__ = ['Heatmap']
def get_heatmap_df():
"""a simple example to play with and perform test"""
import pandas as pd
df = pd.DataFrame(
{'A':[1,0,1,1],
'B':[.9,0.1,.6,1],
'C':[.5,.2,0,1],
'D':[.5,.2,0,1]})
return df
#def heatmap(data, *args, **kargs):
# """alias to Heatmap class"""
# h = Heatmap(data, *args, **kargs)
# h.plot()
# return h
class Heatmap(Linkage):
"""Heatmap and dendograms of an input matrix
A heat map is an image representation of a matrix with a
dendrogram added to the left side and to the top. Typically,
reordering of the rows and columns according to some set of values
(row or column means) within the restrictions imposed by the
dendrogram is carried out.
.. plot::
:include-source:
:width: 50%
from biokit.viz import heatmap
df = heatmap.get_heatmap_df()
h = heatmap.Heatmap(df)
h.plot()
.. warning:: in progress
"""
def __init__(self, data=None, row_method='complete', column_method='complete',
row_metric='euclidean',column_metric='euclidean',
cmap='yellow_black_blue',
col_side_colors=None, row_side_colors=None,
verbose=True
):
""".. rubric:: constructor
:param data: a dataframe or possibly a numpy matrix.
.. todo:: if row_method id none, no ordering in the dendogram
"""
# should be a copy since it may be reshuffled ?
try:
if data is None and verbose is True:
print("No data provided, please fill the `df` attribute manually")
else:
self._df = data.copy()
except AttributeError as err:
print("input must be a pandas data frame or numpy matrix")
raise(err)
self._row_method = row_method
self._column_method = column_method
self._column_metric = column_metric
self._row_metric = row_metric
# some default parameters
self.cluster_criterion = 'distance'
self.params = easydev.AttrDict()
self.params.col_side_colors = ['r', 'g', 'b', 'y', 'w', 'k', 'm']
self.params.row_side_colors = ['r', 'g', 'b', 'y', 'w', 'k', 'm']
self.params.cmap = cmap
self.category_row = None
self.category_column = None
if col_side_colors:
self.params.col_side_colors = col_side_colors
if row_side_colors:
self.params.row_side_colors = row_side_colors
def _get_df(self):
return self._df
def _set_df(self, data):
self._df = data.copy()
df = property(_get_df, _set_df)
frame = property(_get_df, _set_df)
def _get_row_method(self):
return self._row_method
def _set_row_method(self, value):
self.check_method(value)
self._row_method = value
row_method = property(_get_row_method, _set_row_method)
def _get_col_method(self):
return self._column_method
def _set_col_method(self, value):
self.check_method(value)
self._column_method = value
column_method = property(_get_col_method, _set_col_method)
def _get_col_metric(self):
return self._column_metric
def _set_col_metric(self, value):
self.check_metric(value)
self._column_metric = value
column_metric = property(_get_col_metric, _set_col_metric)
def _get_row_metric(self):
return self._row_metric
def _set_row_metric(self, value):
self.check_metric(value)
self._row_metric = value
row_metric = property(_get_row_metric, _set_row_metric)
def plot(self, num=1, cmap=None, colorbar=True, vmin=None,
vmax=None, colorbar_position='right', gradient_span='None'
):
"""
:param gradient_span: None is default in R
Using::
df = pd.DataFrame({'A':[1,0,1,1],
'B':[.9,0.1,.6,1],
'C':[.5,.2,0,1],
'D':[.5,.2,0,1]})
and ::
h = Heatmap(df)
h.plot(vmin=0, vmax=1.1)
we seem to get the same as in R wiht ::
df = data.frame(A=c(1,0,1,1), B=c(.9,.1,.6,1), C=c(.5,.2,0,1), D=c(.5,.2,0,1))
heatmap((as.matrix(df)), scale='none')
.. todo:: right now, the order of cols and rows is random somehow.
could be ordered like in heatmap (r) byt mean of the row and col
or with a set of vector for col and rows.
heatmap((as.matrix(df)), Rowv=c(3,2), Colv=c(1), scale='none')
gives same as::
df = get_heatmap_df()
h = heatmap.Heatmap(df)
h.plot(vmin=-0, vmax=1.1)
"""
# save all parameters in a dict
layout = {}
if cmap is None:
cmap = self.params.cmap
try:cmap = colormap.cmap_builder(cmap)
except:pass
# keep track of row and column names for later.
row_header = self.frame.index
column_header = self.frame.columns
# FIXME something clever for the fontsize
if len(row_header) > 100 or len(column_header) > 100:
matplotlib.rcParams['font.size'] = 6
if len(row_header) > 50 or len(column_header) > 50:
matplotlib.rcParams['font.size'] = 7
else:
matplotlib.rcParams['font.size'] = 12
# scaling min/max range
self.gradient_span = gradient_span #'only_max'
# min_to_max, min_to_max_centered, only_max, only_min
if self.gradient_span == 'min_to_max_centered':
vmax = max([vmax, abs(vmin)])
vmin = vmax * -1
if self.gradient_span == 'only_max':
vmin = 0
vmax = self.frame.max().max()
if self.gradient_span == 'only_min':
vmin = self.frame.min().min()
vmax = 0
norm = matplotlib.colors.Normalize(vmin, vmax)
# Scale the figure window size #
fig = pylab.figure(num=num, figsize=(12, 8))
fig.clf()
# LAYOUT --------------------------------------------------
# ax1 (dendrogram 1) on the left of the heatmap
[ax1_x, ax1_y, ax1_w, ax1_h] = [0.05, 0.22, 0.2, 0.6]
width_between_ax1_axr = 0.004
# distance between the top color bar axis and the matrix
height_between_ax1_axc = 0.004
# Sufficient size to show
color_bar_w = 0.015
# axr, placement of row side colorbar
# second to last controls the width of the side color bar - 0.015 when showing
[axr_x, axr_y, axr_w, axr_h] = [0.31, 0.1, color_bar_w, 0.6]
axr_x = ax1_x + ax1_w + width_between_ax1_axr
axr_y = ax1_y; axr_h = ax1_h
width_between_axr_axm = 0.004
# axc, placement of column side colorbar #
# last one controls the hight of the top color bar - 0.015 when showing
[axc_x, axc_y, axc_w, axc_h] = [0.4, 0.63, 0.5, color_bar_w]
axc_x = axr_x + axr_w + width_between_axr_axm
axc_y = ax1_y + ax1_h + height_between_ax1_axc
height_between_axc_ax2 = 0.004
# axm, placement of heatmap for the data matrix # why larger than 1?
[axm_x, axm_y, axm_w, axm_h] = [0.4, 0.9, 2.5, 0.5]
axm_x = axr_x + axr_w + width_between_axr_axm
axm_y = ax1_y; axm_h = ax1_h
axm_w = axc_w
# ax2 (dendrogram 2), on the top of the heatmap #
[ax2_x, ax2_y, ax2_w, ax2_h] = [0.3, 0.72, 0.6, 0.15]
ax2_x = axr_x + axr_w + width_between_axr_axm
ax2_y = ax1_y + ax1_h + height_between_ax1_axc + axc_h + height_between_axc_ax2
ax2_w = axc_w
# axcb - placement of the color legend #
if colorbar_position == 'top left':
[axcb_x, axcb_y, axcb_w, axcb_h] = [0.07, 0.88, 0.18, 0.09]
elif colorbar_position == 'right':
[axcb_x, axcb_y, axcb_w, axcb_h] = [0.85, 0.2, 0.08, 0.6]
else:
raise ValueError("'top left' or 'right' accepted for now")
# COMPUTATION DENDOGRAM 1 -------------------------------------
if self.column_method:
Y = self.linkage(self.frame.transpose(),self.column_method,
self.column_metric )
ax2 = fig.add_axes([ax2_x, ax2_y, ax2_w, ax2_h], frame_on=True)
Z = hierarchy.dendrogram(Y)
ind2 = hierarchy.fcluster(Y, 0.7*max(Y[:,2]), self.cluster_criterion)
ax2.set_xticks([])
ax2.set_yticks([])
# apply the clustering for the array-dendrograms to the actual matrix data
idx2 = Z['leaves']
self.frame = self.frame.iloc[:,idx2]
# reorder the flat cluster to match the order of the leaves the dendrogram
ind2 = ind2[idx2]
layout['dendogram2'] = ax2
else:
idx2 = range(self.frame.shape[1])
# COMPUTATION DENDOGRAM 2 ---------------------------------
if self.row_method:
Y = self.linkage(self.frame, self.row_method, self.row_metric )
ax1 = fig.add_axes([ax1_x, ax1_y, ax1_w, ax1_h], frame_on=True)
Z = hierarchy.dendrogram(Y, orientation='right')
ind1 = hierarchy.fcluster(Y, 0.7*max(Y[:,2]), self.cluster_criterion)
ax1.set_xticks([])
ax1.set_yticks([])
# apply the clustering for the array-dendrograms to the actual matrix data
idx1 = Z['leaves']
self.frame = self.frame.iloc[idx1,:]
# reorder the flat cluster to match the order of the leaves the dendrogram
ind1 = ind1[idx1]
layout['dendogram1'] = ax1
else:
idx1 = range(self.frame.shape[0])
# HEATMAP itself
axm = fig.add_axes([axm_x, axm_y, axm_w, axm_h])
axm.imshow(self.frame, aspect='auto', origin='lower', interpolation='None',
cmap=cmap, norm=norm)
axm.set_xticks([])
axm.set_yticks([])
layout['heatmap'] = axm
# TEXT
new_row_header = []
new_column_header = []
for i in range(self.frame.shape[0]):
axm.text(self.frame.shape[1]-0.5, i, ' ' + str(row_header[idx1[i]]),
verticalalignment="center")
new_row_header.append(row_header[idx1[i]] if self.row_method else row_header[i])
for i in range(self.frame.shape[1]):
axm.text(i, -0.9, ' '+str(column_header[idx2[i]]),
rotation=90, verticalalignment="top",
horizontalalignment="center")
new_column_header.append(column_header[idx2[i]] if self.column_method else column_header[i])
# CATEGORY column ------------------------------
if self.category_column:
axc = fig.add_axes([axc_x, axc_y, axc_w, axc_h])
cmap_c = matplotlib.colors.ListedColormap(self.params.col_side_colors)
category_col = [self.category_column[self.df.columns[i]] for i in idx2]
dc = np.array(category_col, dtype=int)
dc.shape = (1,len(ind2))
axc.matshow(dc, aspect='auto', origin='lower', cmap=cmap_c)
axc.set_xticks([])
axc.set_yticks([])
layout['category_column'] = axc
# CATEGORY row -------------------------------
if self.category_row:
axr = fig.add_axes([axr_x, axr_y, axr_w, axr_h])
# self.category_row must be a dictionary with names as found in the columns
# of the dataframe.
category_row = [self.category_row[self.df.columns[i]] for i in idx1]
dr = np.array(category_row, dtype=int)
dr.shape = (len(category_row),1)
cmap_r = matplotlib.colors.ListedColormap(self.params.col_side_colors)
axr.matshow(dr, aspect='auto', origin='lower', cmap=cmap_r)
axr.set_xticks([])
axr.set_yticks([])
layout['category_row'] = axr
# COLORBAR ----------------------
if colorbar == True:
axcb = fig.add_axes([axcb_x, axcb_y, axcb_w, axcb_h], frame_on=False)
if colorbar_position == 'right':
orientation = 'vertical'
else:
orientation = 'horizontal'
cb = matplotlib.colorbar.ColorbarBase(axcb, cmap=cmap,
norm=norm, orientation=orientation)
#axcb.set_title("whatever")
#max_cb_ticks = 5
#axcb.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(max_cb_ticks))
layout['colorbar'] = cb
# could be useful
self.d = {'ordered': self.frame.copy(), 'rorder': idx1, 'corder': idx2}
return layout
|
from django.http import HttpResponse
from django.views.generic import ListView, TemplateView
from .models import Player
class IndexView(TemplateView):
template_name = 'quarto/index.html'
class BoardView(TemplateView):
template_name = 'quarto/board.html'
context_object_name = 'board'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['board'] = range(1, 17)
class PlayersView(ListView):
context_object_name = 'players'
def get_queryset(self):
return Player.objects.all()
|
# import Python's built-in JSON library
import json, sys
# import the psycopg2 database adapter for PostgreSQL
from psycopg2 import connect, Error
#Get necessary functions
from scryfall_get import *
scry_resp()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-11-15 19:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bruv', '0019_auto_20161025_2316'),
]
operations = [
migrations.RemoveField(
model_name='set',
name='substrate',
),
migrations.RenameModel(
old_name='Substrate',
new_name='BenthicCategory',
),
migrations.RenameModel(
old_name='HabitatSubstrate',
new_name='BenthicCategoryValue',
),
migrations.AddField(
model_name='set',
name='benthic_category',
field=models.ManyToManyField(through='bruv.BenthicCategoryValue', to='bruv.BenthicCategory'),
),
]
|
from robocorp_ls_core.python_ls import PythonLanguageServer
from robocorp_ls_core.basic import overrides, log_and_silence_errors
import os
import time
from robotframework_ls.constants import DEFAULT_COMPLETIONS_TIMEOUT
from robocorp_ls_core.robotframework_log import get_logger
from typing import Any, Optional, List, Dict
from robocorp_ls_core.protocols import (
IConfig,
IWorkspace,
IIdMessageMatcher,
IRobotFrameworkApiClient,
IMonitor,
)
from pathlib import Path
from robotframework_ls.ep_providers import (
EPConfigurationProvider,
EPDirCacheProvider,
EPEndPointProvider,
)
from robocorp_ls_core.jsonrpc.endpoint import require_monitor
from robocorp_ls_core.jsonrpc.monitor import Monitor
from functools import partial
import itertools
from robotframework_ls import __version__, rf_interactive_integration
import typing
import sys
from robocorp_ls_core.watchdog_wrapper import IFSObserver
from robocorp_ls_core.lsp import CodeLensTypedDict
log = get_logger(__name__)
LINT_DEBOUNCE_S = 0.4 # 400 ms
class _CurrLintInfo(object):
def __init__(
self,
rf_lint_api_client: IRobotFrameworkApiClient,
lsp_messages,
doc_uri,
is_saved,
) -> None:
from robocorp_ls_core.lsp import LSPMessages
self._rf_lint_api_client = rf_lint_api_client
self.lsp_messages: LSPMessages = lsp_messages
self.doc_uri = doc_uri
self.is_saved = is_saved
self._monitor = Monitor()
def __call__(self) -> None:
from robocorp_ls_core.jsonrpc.exceptions import JsonRpcRequestCancelled
from robocorp_ls_core.client_base import wait_for_message_matcher
from robotframework_ls.server_api.client import SubprocessDiedError
try:
doc_uri = self.doc_uri
self._monitor.check_cancelled()
found = []
message_matcher = self._rf_lint_api_client.request_lint(doc_uri)
if message_matcher is not None:
if wait_for_message_matcher(
message_matcher,
monitor=self._monitor,
request_cancel=self._rf_lint_api_client.request_cancel,
timeout=60 * 3,
):
diagnostics_msg = message_matcher.msg
if diagnostics_msg:
found = diagnostics_msg.get("result", [])
self.lsp_messages.publish_diagnostics(doc_uri, found)
except JsonRpcRequestCancelled:
log.info(f"Cancelled linting: {self.doc_uri}.")
except SubprocessDiedError:
log.info(f"Subprocess exited while linting: {self.doc_uri}.")
except Exception:
log.exception("Error linting.")
def cancel(self):
self._monitor.cancel()
def run_in_new_thread(func, thread_name):
import threading
t = threading.Thread(target=func)
t.name = thread_name
t.start()
class _LintManager(object):
def __init__(self, server_manager, lsp_messages) -> None:
from robotframework_ls.server_manager import ServerManager
self._server_manager: ServerManager = server_manager
self._lsp_messages = lsp_messages
self._next_id = partial(next, itertools.count())
self._doc_id_to_info: Dict[str, _CurrLintInfo] = {}
def schedule_lint(self, doc_uri: str, is_saved: bool) -> None:
self.cancel_lint(doc_uri)
rf_lint_api_client = self._server_manager.get_lint_rf_api_client(doc_uri)
if rf_lint_api_client is None:
log.info(f"Unable to get lint api for: {doc_uri}")
return
curr_info = _CurrLintInfo(
rf_lint_api_client, self._lsp_messages, doc_uri, is_saved
)
from robocorp_ls_core.timeouts import TimeoutTracker
timeout_tracker = TimeoutTracker.get_singleton()
timeout_tracker.call_on_timeout(
LINT_DEBOUNCE_S, partial(run_in_new_thread, curr_info, f"Lint: {doc_uri}")
)
def cancel_lint(self, doc_uri: str) -> None:
curr_info = self._doc_id_to_info.pop(doc_uri, None)
if curr_info is not None:
curr_info.cancel()
class RobotFrameworkLanguageServer(PythonLanguageServer):
def __init__(self, rx, tx) -> None:
from robocorp_ls_core.pluginmanager import PluginManager
from robotframework_ls.rf_interactive_integration import _RfInterpretersManager
from robotframework_ls.server_manager import ServerManager
from robotframework_ls.ep_providers import DefaultConfigurationProvider
from robotframework_ls.ep_providers import DefaultEndPointProvider
from robotframework_ls.ep_providers import DefaultDirCacheProvider
from robocorp_ls_core import watchdog_wrapper
from robocorp_ls_core.remote_fs_observer_impl import RemoteFSObserver
from robocorp_ls_core.options import Setup
PythonLanguageServer.__init__(self, rx, tx)
from robocorp_ls_core.cache import DirCache
from robotframework_ls import robot_config
home = robot_config.get_robotframework_ls_home()
cache_dir = os.path.join(home, ".cache")
log.debug(f"Cache dir: {cache_dir}")
self._dir_cache = DirCache(cache_dir)
self._pm = PluginManager()
self._config_provider = DefaultConfigurationProvider(self.config)
self._pm.set_instance(EPConfigurationProvider, self._config_provider)
self._pm.set_instance(
EPDirCacheProvider, DefaultDirCacheProvider(self._dir_cache)
)
self._pm.set_instance(
EPEndPointProvider, DefaultEndPointProvider(self._endpoint)
)
self._rf_interpreters_manager = _RfInterpretersManager(self._endpoint, self._pm)
watch_impl = os.environ.get("ROBOTFRAMEWORK_LS_WATCH_IMPL", "auto")
if watch_impl not in ("watchdog", "fsnotify", "auto"):
log.info(
f"ROBOTFRAMEWORK_LS_WATCH_IMPL should be 'auto', 'watchdog' or 'fsnotify'. Found: {watch_impl} (falling back to auto)"
)
watch_impl = "auto"
if watch_impl == "auto":
# In auto mode we use watchdog for windows and fsnotify (polling)
# for Linux and Mac. The reason for that is that on Linux and Mac
# if big folders are watched the system may complain due to the
# lack of resources, which may prevent the extension from working
# properly.
#
# If users want to opt-in, they can change to watchdog (and
# ideally install it to their env to get native extensions).
if sys.platform == "win32":
watch_impl = "watchdog"
else:
watch_impl = "fsnotify"
self._fs_observer = watchdog_wrapper.create_remote_observer(
watch_impl, (".py", ".libspec", "robot", ".resource")
)
remote_observer = typing.cast(RemoteFSObserver, self._fs_observer)
log_file = Setup.options.log_file
if not isinstance(log_file, str):
log_file = None
remote_observer.start_server(log_file=log_file)
self._server_manager = ServerManager(self._pm, language_server=self)
self._lint_manager = _LintManager(self._server_manager, self._lsp_messages)
def get_remote_fs_observer_port(self) -> Optional[int]:
from robocorp_ls_core.remote_fs_observer_impl import RemoteFSObserver
remote_observer = typing.cast(RemoteFSObserver, self._fs_observer)
return remote_observer.port
@overrides(PythonLanguageServer._create_config)
def _create_config(self) -> IConfig:
from robotframework_ls.robot_config import RobotConfig
return RobotConfig()
@overrides(PythonLanguageServer._on_workspace_set)
def _on_workspace_set(self, workspace: IWorkspace):
PythonLanguageServer._on_workspace_set(self, workspace)
self._server_manager.set_workspace(workspace)
@overrides(PythonLanguageServer._obtain_fs_observer)
def _obtain_fs_observer(self) -> IFSObserver:
return self._fs_observer
@overrides(PythonLanguageServer._create_workspace)
def _create_workspace(
self, root_uri: str, fs_observer: IFSObserver, workspace_folders
):
from robotframework_ls.impl.robot_workspace import RobotWorkspace
return RobotWorkspace(
root_uri, fs_observer, workspace_folders, generate_ast=False
)
def m_initialize(
self,
processId=None,
rootUri=None,
rootPath=None,
initializationOptions=None,
workspaceFolders=None,
**_kwargs,
) -> dict:
# capabilities = _kwargs.get("capabilities", {})
# text_document_capabilities = capabilities.get("textDocument", {})
# document_symbol_capabilities = text_document_capabilities.get(
# "documentSymbol", {}
# )
# hierarchical_document_symbol_support = document_symbol_capabilities.get(
# "hierarchicalDocumentSymbolSupport", False
# )
# self._hierarchical_document_symbol_support = (
# hierarchical_document_symbol_support
# )
ret = PythonLanguageServer.m_initialize(
self,
processId=processId,
rootUri=rootUri,
rootPath=rootPath,
initializationOptions=initializationOptions,
workspaceFolders=workspaceFolders,
**_kwargs,
)
initialization_options = initializationOptions
if initialization_options:
plugins_dir = initialization_options.get("pluginsDir")
if isinstance(plugins_dir, str):
if not os.path.isdir(plugins_dir):
log.critical(f"Expected: {plugins_dir} to be a directory.")
else:
self._pm.load_plugins_from(Path(plugins_dir))
return ret
@overrides(PythonLanguageServer.capabilities)
def capabilities(self):
from robocorp_ls_core.lsp import TextDocumentSyncKind
from robotframework_ls.impl.semantic_tokens import TOKEN_TYPES, TOKEN_MODIFIERS
from robotframework_ls import commands
server_capabilities = {
"codeActionProvider": False,
"codeLensProvider": {"resolveProvider": True},
"completionProvider": {
"resolveProvider": False # We know everything ahead of time
},
"documentFormattingProvider": True,
"documentHighlightProvider": False,
"documentRangeFormattingProvider": False,
"documentSymbolProvider": True,
"definitionProvider": True,
"executeCommandProvider": {
"commands": [
"robot.addPluginsDir",
"robot.resolveInterpreter",
"robot.getLanguageServerVersion",
"robot.getInternalInfo",
"robot.listTests",
]
+ commands.ALL_SERVER_COMMANDS
},
"hoverProvider": True,
"referencesProvider": False,
"renameProvider": False,
"foldingRangeProvider": True,
# Note that there are no auto-trigger characters (there's no good
# character as there's no `(` for parameters and putting it as a
# space becomes a bit too much).
"signatureHelpProvider": {"triggerCharacters": []},
"textDocumentSync": {
"change": TextDocumentSyncKind.INCREMENTAL,
"save": {"includeText": False},
"openClose": True,
},
"workspace": {
"workspaceFolders": {"supported": True, "changeNotifications": True}
},
"workspaceSymbolProvider": True,
# The one below isn't accepted by lsp4j (it's still in LSP 3.15.0).
# "workspaceSymbolProvider": {"workDoneProgress": False},
"semanticTokensProvider": {
"legend": {
"tokenTypes": TOKEN_TYPES,
"tokenModifiers": TOKEN_MODIFIERS,
},
"range": False,
"full": True,
},
}
log.info("Server capabilities: %s", server_capabilities)
return server_capabilities
def m_workspace__execute_command(self, command=None, arguments=()) -> Any:
if command == "robot.addPluginsDir":
directory: str = arguments[0]
assert os.path.isdir(directory), f"Expected: {directory} to be a directory."
self._pm.load_plugins_from(Path(directory))
return True
elif command == "robot.getInternalInfo":
in_memory_docs = []
workspace = self.workspace
if workspace:
for doc in workspace.iter_documents():
in_memory_docs.append({"uri": doc.uri})
return {
"settings": self.config.get_full_settings(),
"inMemoryDocs": in_memory_docs,
"processId": os.getpid(),
}
elif command == "robot.resolveInterpreter":
try:
from robocorp_ls_core import uris
from robotframework_ls.ep_resolve_interpreter import (
EPResolveInterpreter,
)
from robotframework_ls.ep_resolve_interpreter import IInterpreterInfo
target_robot: str = arguments[0]
for ep in self._pm.get_implementations(EPResolveInterpreter):
interpreter_info: IInterpreterInfo = (
ep.get_interpreter_info_for_doc_uri(
uris.from_fs_path(target_robot)
)
)
if interpreter_info is not None:
return {
"pythonExe": interpreter_info.get_python_exe(),
"environ": interpreter_info.get_environ(),
"additionalPythonpathEntries": interpreter_info.get_additional_pythonpath_entries(),
}
except:
log.exception(f"Error resolving interpreter. Args: {arguments}")
elif command == "robot.getLanguageServerVersion":
return __version__
elif command.startswith("robot.internal.rfinteractive."):
return rf_interactive_integration.execute_command(
command, self, self._rf_interpreters_manager, arguments
)
elif command == "robot.listTests":
doc_uri = arguments[0]["uri"]
rf_api_client = self._server_manager.get_others_api_client(doc_uri)
if rf_api_client is not None:
func = partial(
self._async_api_request,
rf_api_client,
"request_list_tests",
doc_uri=doc_uri,
)
func = require_monitor(func)
return func
log.info("Unable to list tests (no api available).")
return []
@overrides(PythonLanguageServer.m_workspace__did_change_configuration)
@log_and_silence_errors(log)
def m_workspace__did_change_configuration(self, **kwargs):
PythonLanguageServer.m_workspace__did_change_configuration(self, **kwargs)
self._server_manager.set_config(self.config)
# --- Methods to forward to the api
@overrides(PythonLanguageServer.m_shutdown)
@log_and_silence_errors(log)
def m_shutdown(self, **kwargs):
try:
from robocorp_ls_core.remote_fs_observer_impl import RemoteFSObserver
remote_observer = typing.cast(RemoteFSObserver, self._fs_observer)
remote_observer.dispose()
except Exception:
log.exception("Error disposing RemoteFSObserver.")
self._server_manager.shutdown()
PythonLanguageServer.m_shutdown(self, **kwargs)
@overrides(PythonLanguageServer.m_exit)
@log_and_silence_errors(log)
def m_exit(self, **kwargs):
self._server_manager.exit()
PythonLanguageServer.m_exit(self, **kwargs)
def m_text_document__formatting(
self, textDocument=None, options=None
) -> Optional[list]:
doc_uri = textDocument["uri"]
source_format_rf_api_client = self._server_manager.get_others_api_client(
doc_uri
)
if source_format_rf_api_client is None:
log.info("Unable to get API for source format.")
return []
message_matcher = source_format_rf_api_client.request_source_format(
text_document=textDocument, options=options
)
if message_matcher is None:
raise RuntimeError(
"Error requesting code formatting (message_matcher==None)."
)
curtime = time.time()
maxtime = curtime + DEFAULT_COMPLETIONS_TIMEOUT
# i.e.: wait X seconds for the code format and bail out if we
# can't get it.
available_time = maxtime - time.time()
if available_time <= 0:
raise RuntimeError("Code formatting timed-out (available_time <= 0).")
if message_matcher.event.wait(available_time):
msg = message_matcher.msg
if msg is not None:
result = msg.get("result")
if result:
return result
else:
return []
raise RuntimeError("Code formatting timed-out.")
@overrides(PythonLanguageServer.m_text_document__did_close)
def m_text_document__did_close(self, textDocument=None, **_kwargs):
self._server_manager.forward(
("api", "lint", "others"),
"textDocument/didClose",
{"textDocument": textDocument},
)
PythonLanguageServer.m_text_document__did_close(
self, textDocument=textDocument, **_kwargs
)
@overrides(PythonLanguageServer.m_text_document__did_open)
def m_text_document__did_open(self, textDocument=None, **_kwargs):
self._server_manager.forward(
("api", "lint", "others"),
"textDocument/didOpen",
{"textDocument": textDocument},
)
PythonLanguageServer.m_text_document__did_open(
self, textDocument=textDocument, **_kwargs
)
@overrides(PythonLanguageServer.m_text_document__did_change)
def m_text_document__did_change(
self, contentChanges=None, textDocument=None, **_kwargs
):
self._server_manager.forward(
("api", "lint", "others"),
"textDocument/didChange",
{"contentChanges": contentChanges, "textDocument": textDocument},
)
PythonLanguageServer.m_text_document__did_change(
self, contentChanges=contentChanges, textDocument=textDocument, **_kwargs
)
@overrides(PythonLanguageServer.m_workspace__did_change_workspace_folders)
def m_workspace__did_change_workspace_folders(self, event=None, **_kwargs):
self._server_manager.forward(
("api", "lint", "others"),
"workspace/didChangeWorkspaceFolders",
{"event": event},
)
PythonLanguageServer.m_workspace__did_change_workspace_folders(
self, event=event, **_kwargs
)
# --- Customized implementation
@overrides(PythonLanguageServer.lint)
def lint(self, doc_uri, is_saved) -> None:
self._lint_manager.schedule_lint(doc_uri, is_saved)
@overrides(PythonLanguageServer.cancel_lint)
def cancel_lint(self, doc_uri) -> None:
self._lint_manager.cancel_lint(doc_uri)
def m_text_document__completion(self, **kwargs):
doc_uri = kwargs["textDocument"]["uri"]
# Note: 0-based
line, col = kwargs["position"]["line"], kwargs["position"]["character"]
rf_api_client = self._server_manager.get_regular_rf_api_client(doc_uri)
if rf_api_client is not None:
func = partial(
self._threaded_document_completion, rf_api_client, doc_uri, line, col
)
func = require_monitor(func)
return func
log.info("Unable to get completions (no api available).")
return []
@log_and_silence_errors(log, return_on_error=[])
def _threaded_document_completion(
self,
rf_api_client: IRobotFrameworkApiClient,
doc_uri: str,
line: int,
col: int,
monitor: IMonitor,
) -> list:
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl import section_completions
from robotframework_ls.impl import snippets_completions
from robocorp_ls_core.client_base import wait_for_message_matchers
ws = self.workspace
if not ws:
log.critical("Workspace must be set before returning completions.")
return []
document = ws.get_document(doc_uri, accept_from_file=True)
if document is None:
log.critical("Unable to find document (%s) for completions." % (doc_uri,))
return []
ctx = CompletionContext(document, line, col, config=self.config)
completions = []
# Asynchronous completion.
message_matchers: List[Optional[IIdMessageMatcher]] = []
message_matchers.append(rf_api_client.request_complete_all(doc_uri, line, col))
# These run locally (no need to get from the server).
completions.extend(section_completions.complete(ctx))
completions.extend(snippets_completions.complete(ctx))
accepted_message_matchers = wait_for_message_matchers(
message_matchers,
monitor,
rf_api_client.request_cancel,
DEFAULT_COMPLETIONS_TIMEOUT,
)
for message_matcher in accepted_message_matchers:
msg = message_matcher.msg
if msg is not None:
result = msg.get("result")
if result:
completions.extend(result)
return completions
@log_and_silence_errors(log)
def _async_api_request(
self,
rf_api_client: IRobotFrameworkApiClient,
request_method_name: str,
doc_uri: str,
monitor: IMonitor,
**kwargs,
):
from robocorp_ls_core.client_base import wait_for_message_matcher
func = getattr(rf_api_client, request_method_name)
ws = self.workspace
if not ws:
log.critical(
"Workspace must be set before calling %s.", request_method_name
)
return None
document = ws.get_document(doc_uri, accept_from_file=True)
if document is None:
log.critical(
"Unable to find document (%s) for %s." % (doc_uri, request_method_name)
)
return None
# Asynchronous completion.
message_matcher: Optional[IIdMessageMatcher] = func(doc_uri, **kwargs)
if message_matcher is None:
log.debug("Message matcher for %s returned None.", request_method_name)
return None
if wait_for_message_matcher(
message_matcher,
rf_api_client.request_cancel,
DEFAULT_COMPLETIONS_TIMEOUT,
monitor,
):
msg = message_matcher.msg
if msg is not None:
result = msg.get("result")
if result:
return result
return None
@log_and_silence_errors(log)
def _async_api_request_no_doc(
self,
rf_api_client: IRobotFrameworkApiClient,
request_method_name: str,
monitor: Optional[IMonitor],
**kwargs,
):
from robocorp_ls_core.client_base import wait_for_message_matcher
func = getattr(rf_api_client, request_method_name)
# Asynchronous completion.
message_matcher: Optional[IIdMessageMatcher] = func(**kwargs)
if message_matcher is None:
log.debug("Message matcher for %s returned None.", request_method_name)
return None
if wait_for_message_matcher(
message_matcher,
rf_api_client.request_cancel,
DEFAULT_COMPLETIONS_TIMEOUT,
monitor,
):
msg = message_matcher.msg
if msg is not None:
result = msg.get("result")
if result:
return result
return None
def m_text_document__definition(self, **kwargs):
doc_uri = kwargs["textDocument"]["uri"]
# Note: 0-based
line, col = kwargs["position"]["line"], kwargs["position"]["character"]
rf_api_client = self._server_manager.get_regular_rf_api_client(doc_uri)
if rf_api_client is not None:
func = partial(
self._async_api_request,
rf_api_client,
"request_find_definition",
doc_uri=doc_uri,
line=line,
col=col,
)
func = require_monitor(func)
return func
log.info("Unable to find definition (no api available).")
return None
def m_text_document__signature_help(self, **kwargs):
"""
"params": {
"textDocument": {
"uri": "file:///x%3A/vscode-robot/local_test/Basic/resources/keywords.robot"
},
"position": {"line": 7, "character": 22},
"context": {
"isRetrigger": False,
"triggerCharacter": " ",
"triggerKind": 2,
},
},
"""
doc_uri = kwargs["textDocument"]["uri"]
# Note: 0-based
line, col = kwargs["position"]["line"], kwargs["position"]["character"]
rf_api_client = self._server_manager.get_regular_rf_api_client(doc_uri)
if rf_api_client is not None:
func = partial(
self._async_api_request,
rf_api_client,
"request_signature_help",
doc_uri=doc_uri,
line=line,
col=col,
)
func = require_monitor(func)
return func
log.info("Unable to get signature (no api available).")
return []
def m_text_document__folding_range(self, **kwargs):
"""
"params": {
"textDocument": {
"uri": "file:///x%3A/vscode-robot/local_test/Basic/resources/keywords.robot"
},
},
"""
doc_uri = kwargs["textDocument"]["uri"]
rf_api_client = self._server_manager.get_others_api_client(doc_uri)
if rf_api_client is not None:
func = partial(
self._async_api_request,
rf_api_client,
"request_folding_range",
doc_uri=doc_uri,
)
func = require_monitor(func)
return func
log.info("Unable to get folding range (no api available).")
return []
def m_text_document__code_lens(self, **kwargs):
doc_uri = kwargs["textDocument"]["uri"]
rf_api_client = self._server_manager.get_others_api_client(doc_uri)
if rf_api_client is not None:
func = partial(
self._async_api_request,
rf_api_client,
"request_code_lens",
doc_uri=doc_uri,
)
func = require_monitor(func)
return func
log.info("Unable to get code lens (no api available).")
return []
def m_code_lens__resolve(self, **kwargs):
code_lens: CodeLensTypedDict = kwargs
code_lens_command = code_lens.get("command")
data = code_lens.get("data")
if code_lens_command is None and isinstance(data, dict):
# For the interactive shell we need to resolve the arguments.
uri = data.get("uri")
rf_api_client = self._server_manager.get_others_api_client(uri)
if rf_api_client is not None:
func = partial(
self._async_api_request_no_doc,
rf_api_client,
"request_resolve_code_lens",
code_lens=code_lens,
)
func = require_monitor(func)
return func
log.info("Unable to resolve code lens (no api available).")
return code_lens
def m_text_document__document_symbol(self, **kwargs):
doc_uri = kwargs["textDocument"]["uri"]
rf_api_client = self._server_manager.get_others_api_client(doc_uri)
if rf_api_client is not None:
func = partial(
self._async_api_request,
rf_api_client,
"request_document_symbol",
doc_uri=doc_uri,
)
func = require_monitor(func)
return func
log.info("Unable to get document symbol (no api available).")
return []
def m_text_document__hover(self, **kwargs):
doc_uri = kwargs["textDocument"]["uri"]
# Note: 0-based
line, col = kwargs["position"]["line"], kwargs["position"]["character"]
rf_api_client = self._server_manager.get_regular_rf_api_client(doc_uri)
if rf_api_client is not None:
func = partial(
self._async_api_request,
rf_api_client,
"request_hover",
doc_uri=doc_uri,
line=line,
col=col,
)
func = require_monitor(func)
return func
log.info("Unable to compute hover (no api available).")
return []
def m_text_document__semantic_tokens__range(self, textDocument=None, range=None):
raise RuntimeError("Not currently implemented!")
def m_text_document__semantic_tokens__full(self, textDocument=None):
doc_uri = textDocument["uri"]
api = self._server_manager.get_others_api_client(doc_uri)
if api is None:
log.info("Unable to get api client when computing semantic tokens (full).")
return {"resultId": None, "data": []}
func = partial(
self._async_api_request_no_doc,
api,
"request_semantic_tokens_full",
text_document=textDocument,
)
func = require_monitor(func)
return func
def m_workspace__symbol(self, query: Optional[str] = None) -> Any:
api = self._server_manager.get_others_api_client("")
if api is None:
log.info("Unable to search workspace symbols (no api available).")
return None
func = partial(
self._async_api_request_no_doc,
api,
"request_workspace_symbols",
query=query,
)
func = require_monitor(func)
return func
|
#----------------------------------------------------------------------
# Copyright (c) 2014-2016, Persistent Objects Ltd http://p-o.co.uk/
#
# License: BSD
#----------------------------------------------------------------------
"""
WSGI config for mldemo project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
#pylint: disable=invalid-name
import os
import sys
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
BASE_DIR = os.path.dirname(os.path.realpath(BASE_DIR))
sys.path.append(BASE_DIR)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mldemo.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
pattern_zero=[0.0, 0.01586888658, 0.03121748179, 0.03225806452, 0.04604578564, 0.04812695109, 0.06035379813, 0.06347554631, 0.06451612903, 0.07414151925, 0.07830385016, 0.08038501561, 0.08740894901, 0.09261186264, 0.09573361082, 0.09677419355, 0.10015608741, 0.10639958377, 0.11056191467, 0.11238293444, 0.11264308013, 0.11966701353, 0.12408949011, 0.12486992716, 0.12799167534, 0.12903225807, 0.13241415193, 0.13527575442, 0.13865764828, 0.14281997919, 0.14464099896, 0.14490114464, 0.14594172737, 0.15192507804, 0.15608740895, 0.15634755463, 0.15712799168, 0.16024973985, 0.16129032258, 0.16467221644, 0.16571279917, 0.16753381894, 0.1709157128, 0.17481789802, 0.1750780437, 0.17689906348, 0.17715920916, 0.17819979188, 0.18340270552, 0.18418314256, 0.18834547347, 0.18860561915, 0.18938605619, 0.19146722164, 0.19250780437, 0.1935483871, 0.19693028096, 0.19797086368, 0.19901144641, 0.19979188346, 0.20317377732, 0.20603537981, 0.20707596254, 0.20733610822, 0.20915712799, 0.20941727367, 0.2104578564, 0.21253902185, 0.21566077003, 0.21644120708, 0.21852237253, 0.22060353798, 0.22086368366, 0.22164412071, 0.22372528616, 0.22398543184, 0.22476586889, 0.22580645161, 0.22892819979, 0.22918834547, 0.2302289282, 0.23126951093, 0.23204994797, 0.23335067638, 0.23543184183, 0.2372528616, 0.23829344433, 0.23933402706, 0.23959417274, 0.24063475546, 0.24141519251, 0.24167533819, 0.24271592092, 0.24349635796, 0.24479708637, 0.2458376691, 0.24765868887, 0.24791883455, 0.24869927159, 0.24895941727, 0.24973985432, 0.25, 0.25078043705, 0.2528616025, 0.25312174818, 0.25390218522, 0.25598335068, 0.25624349636, 0.2570239334, 0.25806451613, 0.26118626431, 0.26144640999, 0.26248699272, 0.26352757544, 0.26430801249, 0.2656087409, 0.26768990635, 0.26951092612, 0.27055150885, 0.27159209157, 0.27185223725, 0.27289281998, 0.27367325702, 0.27393340271, 0.27497398543, 0.27575442248, 0.27705515088, 0.27809573361, 0.27991675338, 0.28017689906, 0.28095733611, 0.28121748179, 0.28199791884, 0.28225806452, 0.28303850156, 0.28511966701, 0.2853798127, 0.28616024974, 0.28824141519, 0.28850156087, 0.28928199792, 0.29032258065, 0.29344432882, 0.29370447451, 0.29474505723, 0.29578563996, 0.296566077, 0.29786680541, 0.29994797086, 0.30176899064, 0.30280957336, 0.30385015609, 0.30411030177, 0.3051508845, 0.30593132154, 0.30619146722, 0.30723204995, 0.30801248699, 0.3093132154, 0.31035379813, 0.3121748179, 0.31243496358, 0.31321540062, 0.31347554631, 0.31425598335, 0.31451612903, 0.31529656608, 0.31737773153, 0.31763787721, 0.31841831426, 0.32049947971, 0.32075962539, 0.32154006244, 0.32258064516, 0.32570239334, 0.32596253902, 0.32700312175, 0.32804370448, 0.32882414152, 0.33012486993, 0.33220603538, 0.33402705515, 0.33506763788, 0.3361082206, 0.33636836629, 0.33740894901, 0.33818938606, 0.33844953174, 0.33949011446, 0.34027055151, 0.34157127992, 0.34261186264, 0.34443288241, 0.3446930281, 0.34547346514, 0.34573361082, 0.34651404787, 0.34677419355, 0.34755463059, 0.34963579605, 0.34989594173, 0.35067637877, 0.35275754423, 0.35301768991, 0.35379812695, 0.35483870968, 0.35796045786, 0.35822060354, 0.35926118626, 0.36030176899, 0.36108220604, 0.36238293444, 0.3644640999, 0.36628511967, 0.36732570239, 0.36836628512, 0.3686264308, 0.36966701353, 0.37044745057, 0.37070759625, 0.37174817898, 0.37252861603, 0.37382934443, 0.37486992716, 0.37669094693, 0.37695109261, 0.37773152966, 0.37799167534, 0.37877211238, 0.37903225807, 0.37981269511, 0.38189386056, 0.38215400624, 0.38293444329, 0.38501560874, 0.38527575442, 0.38605619147, 0.38709677419, 0.39021852237, 0.39047866805, 0.39151925078, 0.39255983351, 0.39334027055, 0.39464099896, 0.39672216441, 0.39854318418, 0.39958376691, 0.40062434964, 0.40088449532, 0.40192507804, 0.40270551509, 0.40296566077, 0.4040062435, 0.40478668054, 0.40608740895, 0.40712799168, 0.40894901145, 0.40920915713, 0.40998959417, 0.41024973985, 0.4110301769, 0.41129032258, 0.41207075963, 0.41415192508, 0.41441207076, 0.4151925078, 0.41727367326, 0.41753381894, 0.41831425598, 0.41935483871, 0.42247658689, 0.42273673257, 0.4237773153, 0.42481789802, 0.42559833507, 0.42689906348, 0.42898022893, 0.4308012487, 0.43184183143, 0.43288241415, 0.43314255983, 0.43418314256, 0.43496357961, 0.43522372529, 0.43626430801, 0.43704474506, 0.43834547347, 0.43938605619, 0.44120707596, 0.44146722164, 0.44224765869, 0.44250780437, 0.44328824142, 0.4435483871, 0.44432882414, 0.44640998959, 0.44667013528, 0.44745057232, 0.44953173777, 0.44979188346, 0.4505723205, 0.45161290323, 0.45473465141, 0.45499479709, 0.45603537981, 0.45707596254, 0.45785639958, 0.45915712799, 0.46123829344, 0.46305931322, 0.46409989594, 0.46514047867, 0.46540062435, 0.46644120708, 0.46722164412, 0.4674817898, 0.46852237253, 0.46930280957, 0.47060353798, 0.47164412071, 0.47346514048, 0.47372528616, 0.47450572321, 0.47476586889, 0.47554630593, 0.47580645161, 0.47658688866, 0.47866805411, 0.47892819979, 0.47970863684, 0.48178980229, 0.48204994797, 0.48283038502, 0.48387096774, 0.48699271592, 0.4872528616, 0.48829344433, 0.48933402706, 0.4901144641, 0.49141519251, 0.49349635796, 0.49531737773, 0.49635796046, 0.49739854318, 0.49765868887, 0.49869927159, 0.49947970864, 0.49973985432, 0.50078043705, 0.50156087409, 0.5028616025, 0.50390218522, 0.505723205, 0.50598335068, 0.50676378772, 0.5070239334, 0.50780437045, 0.50806451613, 0.50884495317, 0.51092611863, 0.51118626431, 0.51196670135, 0.51404786681, 0.51430801249, 0.51508844953, 0.51612903226, 0.51925078044, 0.51951092612, 0.52055150885, 0.52159209157, 0.52237252862, 0.52367325702, 0.52575442248, 0.52757544225, 0.52861602497, 0.5296566077, 0.52991675338, 0.53095733611, 0.53173777315, 0.53199791884, 0.53303850156, 0.53381893861, 0.53511966701, 0.53616024974, 0.53798126951, 0.53824141519, 0.53902185224, 0.53928199792, 0.54006243496, 0.54032258065, 0.54110301769, 0.54318418314, 0.54344432882, 0.54422476587, 0.54630593132, 0.546566077, 0.54734651405, 0.54838709677, 0.55150884495, 0.55176899064, 0.55280957336, 0.55385015609, 0.55463059313, 0.55593132154, 0.55801248699, 0.55983350676, 0.56087408949, 0.56191467222, 0.5621748179, 0.56321540062, 0.56399583767, 0.56425598335, 0.56529656608, 0.56607700312, 0.56737773153, 0.56841831426, 0.57023933403, 0.57049947971, 0.57127991675, 0.57154006244, 0.57232049948, 0.57258064516, 0.57336108221, 0.57544224766, 0.57570239334, 0.57648283039, 0.57856399584, 0.57882414152, 0.57960457856, 0.58064516129, 0.58376690947, 0.58402705515, 0.58506763788, 0.5861082206, 0.58688865765, 0.58818938606, 0.59027055151, 0.59209157128, 0.59313215401, 0.59417273673, 0.59443288241, 0.59547346514, 0.59625390219, 0.59651404787, 0.59755463059, 0.59833506764, 0.59963579605, 0.60067637877, 0.60249739854, 0.60275754423, 0.60353798127, 0.60379812695, 0.604578564, 0.60483870968, 0.60561914672, 0.60770031218, 0.60796045786, 0.6087408949, 0.61082206035, 0.61108220604, 0.61186264308, 0.61290322581, 0.61602497399, 0.61628511967, 0.61732570239, 0.61836628512, 0.61914672216, 0.62044745057, 0.62252861603, 0.6243496358, 0.62539021852, 0.62643080125, 0.62669094693, 0.62773152966, 0.6285119667, 0.62877211238, 0.62981269511, 0.63059313215, 0.63189386056, 0.63293444329, 0.63475546306, 0.63501560874, 0.63579604579, 0.63605619147, 0.63683662851, 0.63709677419, 0.63787721124, 0.63995837669, 0.64021852237, 0.64099895942, 0.64308012487, 0.64334027055, 0.6441207076, 0.64516129032, 0.6482830385, 0.64854318418, 0.64958376691, 0.65062434964, 0.65140478668, 0.65270551509, 0.65478668054, 0.65660770031, 0.65764828304, 0.65868886577, 0.65894901145, 0.65998959417, 0.66077003122, 0.6610301769, 0.66207075963, 0.66285119667, 0.66415192508, 0.6651925078, 0.66701352758, 0.66727367326, 0.6680541103, 0.66831425598, 0.66909469303, 0.66935483871, 0.67013527575, 0.67221644121, 0.67247658689, 0.67325702393, 0.67533818939, 0.67559833507, 0.67637877211, 0.67741935484, 0.68054110302, 0.6808012487, 0.68184183143, 0.68288241415, 0.6836628512, 0.68496357961, 0.68704474506, 0.68886576483, 0.68990634756, 0.69094693028, 0.69120707596, 0.69224765869, 0.69302809573, 0.69328824142, 0.69432882414, 0.69510926119, 0.69640998959, 0.69745057232, 0.69927159209, 0.69953173777, 0.70031217482, 0.7005723205, 0.70135275754, 0.70161290323, 0.70239334027, 0.70447450572, 0.70473465141, 0.70551508845, 0.7075962539, 0.70785639958, 0.70863683663, 0.70967741936, 0.71279916753, 0.71305931322, 0.71409989594, 0.71514047867, 0.71592091571, 0.71722164412, 0.71930280957, 0.72112382934, 0.72216441207, 0.7232049948, 0.72346514048, 0.72450572321, 0.72528616025, 0.72554630593, 0.72658688866, 0.7273673257, 0.72866805411, 0.72970863684, 0.73152965661, 0.73178980229, 0.73257023933, 0.73283038502, 0.73361082206, 0.73387096774, 0.73465140479, 0.73673257024, 0.73699271592, 0.73777315297, 0.73985431842, 0.7401144641, 0.74089490115, 0.74193548387, 0.74505723205, 0.74531737773, 0.74635796046, 0.74739854318, 0.74817898023, 0.74947970864, 0.75156087409, 0.75338189386, 0.75442247659, 0.75546305931, 0.755723205, 0.75676378772, 0.75754422477, 0.75780437045, 0.75884495317, 0.75962539022, 0.76092611863, 0.76196670135, 0.76378772112, 0.76404786681, 0.76482830385, 0.76508844953, 0.76586888658, 0.76612903226, 0.7669094693, 0.76899063476, 0.76925078044, 0.77003121748, 0.77211238293, 0.77237252862, 0.77315296566, 0.77419354839, 0.77731529657, 0.77757544225, 0.77861602497, 0.7796566077, 0.78043704475, 0.78173777315, 0.78381893861, 0.78563995838, 0.7866805411, 0.78772112383, 0.78798126951, 0.78902185224, 0.78980228928, 0.79006243496, 0.79110301769, 0.79188345474, 0.79318418314, 0.79422476587, 0.79604578564, 0.79630593132, 0.79708636837, 0.79734651405, 0.79812695109, 0.79838709677, 0.79916753382, 0.80124869927, 0.80150884495, 0.802289282, 0.80437044745, 0.80463059313, 0.80541103018, 0.8064516129, 0.80957336108, 0.80983350676, 0.81087408949, 0.81191467222, 0.81269510926, 0.81399583767, 0.81607700312, 0.81789802289, 0.81893860562, 0.81997918835, 0.82023933403, 0.82127991675, 0.8220603538, 0.82232049948, 0.82336108221, 0.82414151925, 0.82544224766, 0.82648283039, 0.82830385016, 0.82856399584, 0.82934443288, 0.82960457856, 0.83038501561, 0.83064516129, 0.83142559834, 0.83350676379, 0.83376690947, 0.83454734651, 0.83662851197, 0.83688865765, 0.83766909469, 0.83870967742, 0.8418314256, 0.84209157128, 0.84313215401, 0.84417273673, 0.84495317378, 0.84625390219, 0.84833506764, 0.85015608741, 0.85119667014, 0.85223725286, 0.85249739854, 0.85353798127, 0.85431841831, 0.854578564, 0.85561914672, 0.85639958377, 0.85770031218, 0.8587408949, 0.86056191467, 0.86082206035, 0.8616024974, 0.86186264308, 0.86264308013, 0.86290322581, 0.86368366285, 0.8657648283, 0.86602497399, 0.86680541103, 0.86888657648, 0.86914672216, 0.86992715921, 0.87096774194, 0.87408949011, 0.8743496358, 0.87539021852, 0.87643080125, 0.87721123829, 0.8785119667, 0.88059313215, 0.88241415193, 0.88345473465, 0.88449531738, 0.88475546306, 0.88579604579, 0.88657648283, 0.88683662851, 0.88787721124, 0.88865764828, 0.88995837669, 0.89099895942, 0.89281997919, 0.89308012487, 0.89386056192, 0.8941207076, 0.89490114464, 0.89516129032, 0.89594172737, 0.89802289282, 0.8982830385, 0.89906347555, 0.901144641, 0.90140478668, 0.90218522373, 0.90322580645, 0.90634755463, 0.90660770031, 0.90764828304, 0.90868886577, 0.90946930281, 0.91077003122, 0.91285119667, 0.91467221644, 0.91571279917, 0.91675338189, 0.91701352758, 0.9180541103, 0.91883454735, 0.91909469303, 0.92013527575, 0.9209157128, 0.92221644121, 0.92325702393, 0.9250780437, 0.92533818939, 0.92611862643, 0.92637877211, 0.92715920916, 0.92741935484, 0.92819979188, 0.93028095734, 0.93054110302, 0.93132154006, 0.93340270552, 0.9336628512, 0.93444328824, 0.93548387097, 0.93860561915, 0.93886576483, 0.93990634756, 0.94094693028, 0.94172736733, 0.94302809573, 0.94510926119, 0.94693028096, 0.94797086368, 0.94901144641, 0.94927159209, 0.95031217482, 0.95109261186, 0.95135275754, 0.95239334027, 0.95317377732, 0.95447450572, 0.95551508845, 0.95733610822, 0.9575962539, 0.95837669095, 0.95863683663, 0.95941727367, 0.95967741936, 0.9604578564, 0.96253902185, 0.96279916753, 0.96357960458, 0.96566077003, 0.96592091571, 0.96670135276, 0.96774193548, 0.97086368366, 0.97112382934, 0.97216441207, 0.9732049948, 0.97398543184, 0.97528616025, 0.9773673257, 0.97918834547, 0.9802289282, 0.98126951093, 0.98152965661, 0.98257023933, 0.98335067638, 0.98361082206, 0.98465140479, 0.98543184183, 0.98673257024, 0.98777315297, 0.98959417274, 0.98985431842, 0.99063475546, 0.99089490115, 0.99167533819, 0.99193548387, 0.99271592092, 0.99479708637, 0.99505723205, 0.9958376691, 0.99791883455, 0.99817898023, 0.99895941727]
pattern_odd=[0.0, 0.0031217482, 0.0033818939, 0.0044224766, 0.0054630593, 0.0062434964, 0.0075442248, 0.0096253902, 0.01144641, 0.0124869927, 0.0135275754, 0.0137877211, 0.0148283039, 0.0156087409, 0.0158688866, 0.0169094693, 0.0176899063, 0.0189906348, 0.0200312175, 0.0218522373, 0.0221123829, 0.02289282, 0.0231529657, 0.0239334027, 0.0241935484, 0.0249739854, 0.0270551509, 0.0273152966, 0.0280957336, 0.0301768991, 0.0304370447, 0.0312174818, 0.0322580645, 0.0353798127, 0.0356399584, 0.0366805411, 0.0377211238, 0.0385015609, 0.0398022893, 0.0418834547, 0.0437044745, 0.0447450572, 0.04578564, 0.0460457856, 0.0470863684, 0.0478668054, 0.0481269511, 0.0491675338, 0.0499479709, 0.0512486993, 0.052289282, 0.0541103018, 0.0543704475, 0.0551508845, 0.0554110302, 0.0561914672, 0.0564516129, 0.0572320499, 0.0593132154, 0.0595733611, 0.0603537981, 0.0624349636, 0.0626951093, 0.0634755463, 0.064516129, 0.0676378772, 0.0678980229, 0.0689386056, 0.0699791883, 0.0707596254, 0.0720603538, 0.0741415193, 0.075962539, 0.0770031217, 0.0780437045, 0.0783038502, 0.0793444329, 0.0801248699, 0.0803850156, 0.0814255983, 0.0822060354, 0.0835067638, 0.0845473465, 0.0863683663, 0.086628512, 0.087408949, 0.0876690947, 0.0884495317, 0.0887096774, 0.0894901145, 0.0915712799, 0.0918314256, 0.0926118626, 0.0946930281, 0.0949531738, 0.0957336108, 0.0967741935, 0.0998959417, 0.1001560874, 0.1011966701, 0.1022372529, 0.1030176899, 0.1043184183, 0.1063995838, 0.1082206035, 0.1092611863, 0.110301769, 0.1105619147, 0.1116024974, 0.1123829344, 0.1126430801, 0.1136836629, 0.1144640999, 0.1157648283, 0.116805411, 0.1186264308, 0.1188865765, 0.1196670135, 0.1199271592, 0.1207075963, 0.1209677419, 0.121748179, 0.1238293444, 0.1240894901, 0.1248699272, 0.1269510926, 0.1272112383, 0.1279916753, 0.1290322581, 0.1321540062, 0.1324141519, 0.1334547347, 0.1344953174, 0.1352757544, 0.1365764828, 0.1386576483, 0.1404786681, 0.1415192508, 0.1425598335, 0.1428199792, 0.1438605619, 0.144640999, 0.1449011446, 0.1459417274, 0.1467221644, 0.1480228928, 0.1490634755, 0.1508844953, 0.151144641, 0.151925078, 0.1521852237, 0.1529656608, 0.1532258065, 0.1540062435, 0.1560874089, 0.1563475546, 0.1571279917, 0.1592091571, 0.1594693028, 0.1602497399, 0.1612903226, 0.1644120708, 0.1646722164, 0.1657127992, 0.1667533819, 0.1675338189, 0.1688345473, 0.1709157128, 0.1727367326, 0.1737773153, 0.174817898, 0.1750780437, 0.1761186264, 0.1768990635, 0.1771592092, 0.1781997919, 0.1789802289, 0.1802809573, 0.1813215401, 0.1831425598, 0.1834027055, 0.1841831426, 0.1844432882, 0.1852237253, 0.185483871, 0.186264308, 0.1883454735, 0.1886056191, 0.1893860562, 0.1914672216, 0.1917273673, 0.1925078044, 0.1935483871, 0.1966701353, 0.196930281, 0.1979708637, 0.1990114464, 0.1997918835, 0.2010926119, 0.2031737773, 0.2049947971, 0.2060353798, 0.2070759625, 0.2073361082, 0.2083766909, 0.209157128, 0.2094172737, 0.2104578564, 0.2112382934, 0.2125390219, 0.2135796046, 0.2154006244, 0.21566077, 0.2164412071, 0.2167013528, 0.2174817898, 0.2177419355, 0.2185223725, 0.220603538, 0.2208636837, 0.2216441207, 0.2237252862, 0.2239854318, 0.2247658689, 0.2258064516, 0.2289281998, 0.2291883455, 0.2302289282, 0.2312695109, 0.232049948, 0.2333506764, 0.2354318418, 0.2372528616, 0.2382934443, 0.2393340271, 0.2395941727, 0.2406347555, 0.2414151925, 0.2416753382, 0.2427159209, 0.243496358, 0.2447970864, 0.2458376691, 0.2476586889, 0.2479188345, 0.2486992716, 0.2489594173, 0.2497398543, 0.25, 0.250780437, 0.2528616025, 0.2531217482, 0.2539021852, 0.2559833507, 0.2562434964, 0.2570239334, 0.2580645161, 0.2611862643, 0.26144641, 0.2624869927, 0.2635275754, 0.2643080125, 0.2656087409, 0.2676899063, 0.2695109261, 0.2705515088, 0.2715920916, 0.2718522373, 0.27289282, 0.273673257, 0.2739334027, 0.2749739854, 0.2757544225, 0.2770551509, 0.2780957336, 0.2799167534, 0.2801768991, 0.2809573361, 0.2812174818, 0.2819979188, 0.2822580645, 0.2830385016, 0.285119667, 0.2853798127, 0.2861602497, 0.2882414152, 0.2885015609, 0.2892819979, 0.2903225806, 0.2934443288, 0.2937044745, 0.2947450572, 0.29578564, 0.296566077, 0.2978668054, 0.2999479709, 0.3017689906, 0.3028095734, 0.3038501561, 0.3041103018, 0.3051508845, 0.3059313215, 0.3061914672, 0.3072320499, 0.308012487, 0.3093132154, 0.3103537981, 0.3121748179, 0.3124349636, 0.3132154006, 0.3134755463, 0.3142559834, 0.314516129, 0.3152965661, 0.3173777315, 0.3176378772, 0.3184183143, 0.3204994797, 0.3207596254, 0.3215400624, 0.3225806452, 0.3257023933, 0.325962539, 0.3270031217, 0.3280437045, 0.3288241415, 0.3301248699, 0.3322060354, 0.3340270552, 0.3350676379, 0.3361082206, 0.3363683663, 0.337408949, 0.3381893861, 0.3384495317, 0.3394901145, 0.3402705515, 0.3415712799, 0.3426118626, 0.3444328824, 0.3446930281, 0.3454734651, 0.3457336108, 0.3465140479, 0.3467741935, 0.3475546306, 0.349635796, 0.3498959417, 0.3506763788, 0.3527575442, 0.3530176899, 0.353798127, 0.3548387097, 0.3579604579, 0.3582206035, 0.3592611863, 0.360301769, 0.361082206, 0.3623829344, 0.3644640999, 0.3662851197, 0.3673257024, 0.3683662851, 0.3686264308, 0.3696670135, 0.3704474506, 0.3707075963, 0.371748179, 0.372528616, 0.3738293444, 0.3748699272, 0.3766909469, 0.3769510926, 0.3777315297, 0.3779916753, 0.3787721124, 0.3790322581, 0.3798126951, 0.3818938606, 0.3821540062, 0.3829344433, 0.3850156087, 0.3852757544, 0.3860561915, 0.3870967742, 0.3902185224, 0.3904786681, 0.3915192508, 0.3925598335, 0.3933402706, 0.394640999, 0.3967221644, 0.3985431842, 0.3995837669, 0.4006243496, 0.4008844953, 0.401925078, 0.4027055151, 0.4029656608, 0.4040062435, 0.4047866805, 0.4060874089, 0.4071279917, 0.4089490114, 0.4092091571, 0.4099895942, 0.4102497399, 0.4110301769, 0.4112903226, 0.4120707596, 0.4141519251, 0.4144120708, 0.4151925078, 0.4172736733, 0.4175338189, 0.418314256, 0.4193548387, 0.4224765869, 0.4227367326, 0.4237773153, 0.424817898, 0.4255983351, 0.4268990635, 0.4289802289, 0.4308012487, 0.4318418314, 0.4328824142, 0.4331425598, 0.4341831426, 0.4349635796, 0.4352237253, 0.436264308, 0.4370447451, 0.4383454735, 0.4393860562, 0.441207076, 0.4414672216, 0.4422476587, 0.4425078044, 0.4432882414, 0.4435483871, 0.4443288241, 0.4464099896, 0.4466701353, 0.4474505723, 0.4495317378, 0.4497918835, 0.4505723205, 0.4516129032, 0.4547346514, 0.4549947971, 0.4560353798, 0.4570759625, 0.4578563996, 0.459157128, 0.4612382934, 0.4630593132, 0.4640998959, 0.4651404787, 0.4654006244, 0.4664412071, 0.4672216441, 0.4674817898, 0.4685223725, 0.4693028096, 0.470603538, 0.4716441207, 0.4734651405, 0.4737252862, 0.4745057232, 0.4747658689, 0.4755463059, 0.4758064516, 0.4765868887, 0.4786680541, 0.4789281998, 0.4797086368, 0.4817898023, 0.482049948, 0.482830385, 0.4838709677, 0.4869927159, 0.4872528616, 0.4882934443, 0.4893340271, 0.4901144641, 0.4914151925, 0.493496358, 0.4953173777, 0.4963579605, 0.4973985432, 0.4976586889, 0.4986992716, 0.4994797086, 0.4997398543, 0.500780437, 0.5015608741, 0.5028616025, 0.5039021852, 0.505723205, 0.5059833507, 0.5067637877, 0.5070239334, 0.5078043704, 0.5080645161, 0.5088449532, 0.5109261186, 0.5111862643, 0.5119667014, 0.5140478668, 0.5143080125, 0.5150884495, 0.5161290323, 0.5192507804, 0.5195109261, 0.5205515088, 0.5215920916, 0.5223725286, 0.523673257, 0.5257544225, 0.5275754422, 0.528616025, 0.5296566077, 0.5299167534, 0.5309573361, 0.5317377732, 0.5319979188, 0.5330385016, 0.5338189386, 0.535119667, 0.5361602497, 0.5379812695, 0.5382414152, 0.5390218522, 0.5392819979, 0.540062435, 0.5403225806, 0.5411030177, 0.5431841831, 0.5434443288, 0.5442247659, 0.5463059313, 0.546566077, 0.547346514, 0.5483870968, 0.551508845, 0.5517689906, 0.5528095734, 0.5538501561, 0.5546305931, 0.5559313215, 0.558012487, 0.5598335068, 0.5608740895, 0.5619146722, 0.5621748179, 0.5632154006, 0.5639958377, 0.5642559834, 0.5652965661, 0.5660770031, 0.5673777315, 0.5684183143, 0.570239334, 0.5704994797, 0.5712799168, 0.5715400624, 0.5723204995, 0.5725806452, 0.5733610822, 0.5754422477, 0.5757023933, 0.5764828304, 0.5785639958, 0.5788241415, 0.5796045786, 0.5806451613, 0.5837669095, 0.5840270552, 0.5850676379, 0.5861082206, 0.5868886576, 0.5881893861, 0.5902705515, 0.5920915713, 0.593132154, 0.5941727367, 0.5944328824, 0.5954734651, 0.5962539022, 0.5965140479, 0.5975546306, 0.5983350676, 0.599635796, 0.6006763788, 0.6024973985, 0.6027575442, 0.6035379813, 0.603798127, 0.604578564, 0.6048387097, 0.6056191467, 0.6077003122, 0.6079604579, 0.6087408949, 0.6108220604, 0.611082206, 0.6118626431, 0.6129032258, 0.616024974, 0.6162851197, 0.6173257024, 0.6183662851, 0.6191467222, 0.6204474506, 0.622528616, 0.6243496358, 0.6253902185, 0.6264308012, 0.6266909469, 0.6277315297, 0.6285119667, 0.6287721124, 0.6298126951, 0.6305931322, 0.6318938606, 0.6329344433, 0.6347554631, 0.6350156087, 0.6357960458, 0.6360561915, 0.6368366285, 0.6370967742, 0.6378772112, 0.6399583767, 0.6402185224, 0.6409989594, 0.6430801249, 0.6433402706, 0.6441207076, 0.6451612903, 0.6482830385, 0.6485431842, 0.6495837669, 0.6506243496, 0.6514047867, 0.6527055151, 0.6547866805, 0.6566077003, 0.657648283, 0.6586888658, 0.6589490114, 0.6599895942, 0.6607700312, 0.6610301769, 0.6620707596, 0.6628511967, 0.6641519251, 0.6651925078, 0.6670135276, 0.6672736733, 0.6680541103, 0.668314256, 0.669094693, 0.6693548387, 0.6701352758, 0.6722164412, 0.6724765869, 0.6732570239, 0.6753381894, 0.6755983351, 0.6763787721, 0.6774193548, 0.680541103, 0.6808012487, 0.6818418314, 0.6828824142, 0.6836628512, 0.6849635796, 0.6870447451, 0.6888657648, 0.6899063476, 0.6909469303, 0.691207076, 0.6922476587, 0.6930280957, 0.6932882414, 0.6943288241, 0.6951092612, 0.6964099896, 0.6974505723, 0.6992715921, 0.6995317378, 0.7003121748, 0.7005723205, 0.7013527575, 0.7016129032, 0.7023933403, 0.7044745057, 0.7047346514, 0.7055150885, 0.7075962539, 0.7078563996, 0.7086368366, 0.7096774194, 0.7127991675, 0.7130593132, 0.7140998959, 0.7151404787, 0.7159209157, 0.7172216441, 0.7193028096, 0.7211238293, 0.7221644121, 0.7232049948, 0.7234651405, 0.7245057232, 0.7252861603, 0.7255463059, 0.7265868887, 0.7273673257, 0.7286680541, 0.7297086368, 0.7315296566, 0.7317898023, 0.7325702393, 0.732830385, 0.7336108221, 0.7338709677, 0.7346514048, 0.7367325702, 0.7369927159, 0.737773153, 0.7398543184, 0.7401144641, 0.7408949011, 0.7419354839, 0.7450572321, 0.7453173777, 0.7463579605, 0.7473985432, 0.7481789802, 0.7494797086, 0.7515608741, 0.7533818939, 0.7544224766, 0.7554630593, 0.755723205, 0.7567637877, 0.7575442248, 0.7578043704, 0.7588449532, 0.7596253902, 0.7609261186, 0.7619667014, 0.7637877211, 0.7640478668, 0.7648283039, 0.7650884495, 0.7658688866, 0.7661290323, 0.7669094693, 0.7689906348, 0.7692507804, 0.7700312175, 0.7721123829, 0.7723725286, 0.7731529657, 0.7741935484, 0.7773152966, 0.7775754422, 0.778616025, 0.7796566077, 0.7804370447, 0.7817377732, 0.7838189386, 0.7856399584, 0.7866805411, 0.7877211238, 0.7879812695, 0.7890218522, 0.7898022893, 0.790062435, 0.7911030177, 0.7918834547, 0.7931841831, 0.7942247659, 0.7960457856, 0.7963059313, 0.7970863684, 0.797346514, 0.7981269511, 0.7983870968, 0.7991675338, 0.8012486993, 0.801508845, 0.802289282, 0.8043704475, 0.8046305931, 0.8054110302, 0.8064516129, 0.8095733611, 0.8098335068, 0.8108740895, 0.8119146722, 0.8126951093, 0.8139958377, 0.8160770031, 0.8178980229, 0.8189386056, 0.8199791883, 0.820239334, 0.8212799168, 0.8220603538, 0.8223204995, 0.8233610822, 0.8241415193, 0.8254422477, 0.8264828304, 0.8283038502, 0.8285639958, 0.8293444329, 0.8296045786, 0.8303850156, 0.8306451613, 0.8314255983, 0.8335067638, 0.8337669095, 0.8345473465, 0.836628512, 0.8368886576, 0.8376690947, 0.8387096774, 0.8418314256, 0.8420915713, 0.843132154, 0.8441727367, 0.8449531738, 0.8462539022, 0.8483350676, 0.8501560874, 0.8511966701, 0.8522372529, 0.8524973985, 0.8535379813, 0.8543184183, 0.854578564, 0.8556191467, 0.8563995838, 0.8577003122, 0.8587408949, 0.8605619147, 0.8608220604, 0.8616024974, 0.8618626431, 0.8626430801, 0.8629032258, 0.8636836629, 0.8657648283, 0.866024974, 0.866805411, 0.8688865765, 0.8691467222, 0.8699271592, 0.8709677419, 0.8740894901, 0.8743496358, 0.8753902185, 0.8764308012, 0.8772112383, 0.8785119667, 0.8805931322, 0.8824141519, 0.8834547347, 0.8844953174, 0.8847554631, 0.8857960458, 0.8865764828, 0.8868366285, 0.8878772112, 0.8886576483, 0.8899583767, 0.8909989594, 0.8928199792, 0.8930801249, 0.8938605619, 0.8941207076, 0.8949011446, 0.8951612903, 0.8959417274, 0.8980228928, 0.8982830385, 0.8990634755, 0.901144641, 0.9014047867, 0.9021852237, 0.9032258065, 0.9063475546, 0.9066077003, 0.907648283, 0.9086888658, 0.9094693028, 0.9107700312, 0.9128511967, 0.9146722164, 0.9157127992, 0.9167533819, 0.9170135276, 0.9180541103, 0.9188345473, 0.919094693, 0.9201352758, 0.9209157128, 0.9222164412, 0.9232570239, 0.9250780437, 0.9253381894, 0.9261186264, 0.9263787721, 0.9271592092, 0.9274193548, 0.9281997919, 0.9302809573, 0.930541103, 0.9313215401, 0.9334027055, 0.9336628512, 0.9344432882, 0.935483871, 0.9386056191, 0.9388657648, 0.9399063476, 0.9409469303, 0.9417273673, 0.9430280957, 0.9451092612, 0.946930281, 0.9479708637, 0.9490114464, 0.9492715921, 0.9503121748, 0.9510926119, 0.9513527575, 0.9523933403, 0.9531737773, 0.9544745057, 0.9555150885, 0.9573361082, 0.9575962539, 0.9583766909, 0.9586368366, 0.9594172737, 0.9596774194, 0.9604578564, 0.9625390219, 0.9627991675, 0.9635796046, 0.96566077, 0.9659209157, 0.9667013528, 0.9677419355, 0.9708636837, 0.9711238293, 0.9721644121, 0.9732049948, 0.9739854318, 0.9752861603, 0.9773673257, 0.9791883455, 0.9802289282, 0.9812695109, 0.9815296566, 0.9825702393, 0.9833506764, 0.9836108221, 0.9846514048, 0.9854318418, 0.9867325702, 0.987773153, 0.9895941727, 0.9898543184, 0.9906347555, 0.9908949011, 0.9916753382, 0.9919354839, 0.9927159209, 0.9947970864, 0.9950572321, 0.9958376691, 0.9979188345, 0.9981789802, 0.9989594173]
pattern_even=[0.0, 0.0031217482, 0.0033818939, 0.0044224766, 0.0054630593, 0.0062434964, 0.0075442248, 0.0096253902, 0.01144641, 0.0124869927, 0.0135275754, 0.0137877211, 0.0148283039, 0.0156087409, 0.0158688866, 0.0169094693, 0.0176899063, 0.0189906348, 0.0200312175, 0.0218522373, 0.0221123829, 0.02289282, 0.0231529657, 0.0239334027, 0.0241935484, 0.0249739854, 0.0270551509, 0.0273152966, 0.0280957336, 0.0301768991, 0.0304370447, 0.0312174818, 0.0322580645, 0.0353798127, 0.0356399584, 0.0366805411, 0.0377211238, 0.0385015609, 0.0398022893, 0.0418834547, 0.0437044745, 0.0447450572, 0.04578564, 0.0460457856, 0.0470863684, 0.0478668054, 0.0481269511, 0.0491675338, 0.0499479709, 0.0512486993, 0.052289282, 0.0541103018, 0.0543704475, 0.0551508845, 0.0554110302, 0.0561914672, 0.0564516129, 0.0572320499, 0.0593132154, 0.0595733611, 0.0603537981, 0.0624349636, 0.0626951093, 0.0634755463, 0.064516129, 0.0676378772, 0.0678980229, 0.0689386056, 0.0699791883, 0.0707596254, 0.0720603538, 0.0741415193, 0.075962539, 0.0770031217, 0.0780437045, 0.0783038502, 0.0793444329, 0.0801248699, 0.0803850156, 0.0814255983, 0.0822060354, 0.0835067638, 0.0845473465, 0.0863683663, 0.086628512, 0.087408949, 0.0876690947, 0.0884495317, 0.0887096774, 0.0894901145, 0.0915712799, 0.0918314256, 0.0926118626, 0.0946930281, 0.0949531738, 0.0957336108, 0.0967741935, 0.0998959417, 0.1001560874, 0.1011966701, 0.1022372529, 0.1030176899, 0.1043184183, 0.1063995838, 0.1082206035, 0.1092611863, 0.110301769, 0.1105619147, 0.1116024974, 0.1123829344, 0.1126430801, 0.1136836629, 0.1144640999, 0.1157648283, 0.116805411, 0.1186264308, 0.1188865765, 0.1196670135, 0.1199271592, 0.1207075963, 0.1209677419, 0.121748179, 0.1238293444, 0.1240894901, 0.1248699272, 0.1269510926, 0.1272112383, 0.1279916753, 0.1290322581, 0.1321540062, 0.1324141519, 0.1334547347, 0.1344953174, 0.1352757544, 0.1365764828, 0.1386576483, 0.1404786681, 0.1415192508, 0.1425598335, 0.1428199792, 0.1438605619, 0.144640999, 0.1449011446, 0.1459417274, 0.1467221644, 0.1480228928, 0.1490634755, 0.1508844953, 0.151144641, 0.151925078, 0.1521852237, 0.1529656608, 0.1532258065, 0.1540062435, 0.1560874089, 0.1563475546, 0.1571279917, 0.1592091571, 0.1594693028, 0.1602497399, 0.1612903226, 0.1644120708, 0.1646722164, 0.1657127992, 0.1667533819, 0.1675338189, 0.1688345473, 0.1709157128, 0.1727367326, 0.1737773153, 0.174817898, 0.1750780437, 0.1761186264, 0.1768990635, 0.1771592092, 0.1781997919, 0.1789802289, 0.1802809573, 0.1813215401, 0.1831425598, 0.1834027055, 0.1841831426, 0.1844432882, 0.1852237253, 0.185483871, 0.186264308, 0.1883454735, 0.1886056191, 0.1893860562, 0.1914672216, 0.1917273673, 0.1925078044, 0.1935483871, 0.1966701353, 0.196930281, 0.1979708637, 0.1990114464, 0.1997918835, 0.2010926119, 0.2031737773, 0.2049947971, 0.2060353798, 0.2070759625, 0.2073361082, 0.2083766909, 0.209157128, 0.2094172737, 0.2104578564, 0.2112382934, 0.2125390219, 0.2135796046, 0.2154006244, 0.21566077, 0.2164412071, 0.2167013528, 0.2174817898, 0.2177419355, 0.2185223725, 0.220603538, 0.2208636837, 0.2216441207, 0.2237252862, 0.2239854318, 0.2247658689, 0.2258064516, 0.2289281998, 0.2291883455, 0.2302289282, 0.2312695109, 0.232049948, 0.2333506764, 0.2354318418, 0.2372528616, 0.2382934443, 0.2393340271, 0.2395941727, 0.2406347555, 0.2414151925, 0.2416753382, 0.2427159209, 0.243496358, 0.2447970864, 0.2458376691, 0.2476586889, 0.2479188345, 0.2486992716, 0.2489594173, 0.2497398543, 0.25, 0.250780437, 0.2528616025, 0.2531217482, 0.2539021852, 0.2559833507, 0.2562434964, 0.2570239334, 0.2580645161, 0.2611862643, 0.26144641, 0.2624869927, 0.2635275754, 0.2643080125, 0.2656087409, 0.2676899063, 0.2695109261, 0.2705515088, 0.2715920916, 0.2718522373, 0.27289282, 0.273673257, 0.2739334027, 0.2749739854, 0.2757544225, 0.2770551509, 0.2780957336, 0.2799167534, 0.2801768991, 0.2809573361, 0.2812174818, 0.2819979188, 0.2822580645, 0.2830385016, 0.285119667, 0.2853798127, 0.2861602497, 0.2882414152, 0.2885015609, 0.2892819979, 0.2903225806, 0.2934443288, 0.2937044745, 0.2947450572, 0.29578564, 0.296566077, 0.2978668054, 0.2999479709, 0.3017689906, 0.3028095734, 0.3038501561, 0.3041103018, 0.3051508845, 0.3059313215, 0.3061914672, 0.3072320499, 0.308012487, 0.3093132154, 0.3103537981, 0.3121748179, 0.3124349636, 0.3132154006, 0.3134755463, 0.3142559834, 0.314516129, 0.3152965661, 0.3173777315, 0.3176378772, 0.3184183143, 0.3204994797, 0.3207596254, 0.3215400624, 0.3225806452, 0.3257023933, 0.325962539, 0.3270031217, 0.3280437045, 0.3288241415, 0.3301248699, 0.3322060354, 0.3340270552, 0.3350676379, 0.3361082206, 0.3363683663, 0.337408949, 0.3381893861, 0.3384495317, 0.3394901145, 0.3402705515, 0.3415712799, 0.3426118626, 0.3444328824, 0.3446930281, 0.3454734651, 0.3457336108, 0.3465140479, 0.3467741935, 0.3475546306, 0.349635796, 0.3498959417, 0.3506763788, 0.3527575442, 0.3530176899, 0.353798127, 0.3548387097, 0.3579604579, 0.3582206035, 0.3592611863, 0.360301769, 0.361082206, 0.3623829344, 0.3644640999, 0.3662851197, 0.3673257024, 0.3683662851, 0.3686264308, 0.3696670135, 0.3704474506, 0.3707075963, 0.371748179, 0.372528616, 0.3738293444, 0.3748699272, 0.3766909469, 0.3769510926, 0.3777315297, 0.3779916753, 0.3787721124, 0.3790322581, 0.3798126951, 0.3818938606, 0.3821540062, 0.3829344433, 0.3850156087, 0.3852757544, 0.3860561915, 0.3870967742, 0.3902185224, 0.3904786681, 0.3915192508, 0.3925598335, 0.3933402706, 0.394640999, 0.3967221644, 0.3985431842, 0.3995837669, 0.4006243496, 0.4008844953, 0.401925078, 0.4027055151, 0.4029656608, 0.4040062435, 0.4047866805, 0.4060874089, 0.4071279917, 0.4089490114, 0.4092091571, 0.4099895942, 0.4102497399, 0.4110301769, 0.4112903226, 0.4120707596, 0.4141519251, 0.4144120708, 0.4151925078, 0.4172736733, 0.4175338189, 0.418314256, 0.4193548387, 0.4224765869, 0.4227367326, 0.4237773153, 0.424817898, 0.4255983351, 0.4268990635, 0.4289802289, 0.4308012487, 0.4318418314, 0.4328824142, 0.4331425598, 0.4341831426, 0.4349635796, 0.4352237253, 0.436264308, 0.4370447451, 0.4383454735, 0.4393860562, 0.441207076, 0.4414672216, 0.4422476587, 0.4425078044, 0.4432882414, 0.4435483871, 0.4443288241, 0.4464099896, 0.4466701353, 0.4474505723, 0.4495317378, 0.4497918835, 0.4505723205, 0.4516129032, 0.4547346514, 0.4549947971, 0.4560353798, 0.4570759625, 0.4578563996, 0.459157128, 0.4612382934, 0.4630593132, 0.4640998959, 0.4651404787, 0.4654006244, 0.4664412071, 0.4672216441, 0.4674817898, 0.4685223725, 0.4693028096, 0.470603538, 0.4716441207, 0.4734651405, 0.4737252862, 0.4745057232, 0.4747658689, 0.4755463059, 0.4758064516, 0.4765868887, 0.4786680541, 0.4789281998, 0.4797086368, 0.4817898023, 0.482049948, 0.482830385, 0.4838709677, 0.4869927159, 0.4872528616, 0.4882934443, 0.4893340271, 0.4901144641, 0.4914151925, 0.493496358, 0.4953173777, 0.4963579605, 0.4973985432, 0.4976586889, 0.4986992716, 0.4994797086, 0.4997398543, 0.500780437, 0.5015608741, 0.5028616025, 0.5039021852, 0.505723205, 0.5059833507, 0.5067637877, 0.5070239334, 0.5078043704, 0.5080645161, 0.5088449532, 0.5109261186, 0.5111862643, 0.5119667014, 0.5140478668, 0.5143080125, 0.5150884495, 0.5161290323, 0.5192507804, 0.5195109261, 0.5205515088, 0.5215920916, 0.5223725286, 0.523673257, 0.5257544225, 0.5275754422, 0.528616025, 0.5296566077, 0.5299167534, 0.5309573361, 0.5317377732, 0.5319979188, 0.5330385016, 0.5338189386, 0.535119667, 0.5361602497, 0.5379812695, 0.5382414152, 0.5390218522, 0.5392819979, 0.540062435, 0.5403225806, 0.5411030177, 0.5431841831, 0.5434443288, 0.5442247659, 0.5463059313, 0.546566077, 0.547346514, 0.5483870968, 0.551508845, 0.5517689906, 0.5528095734, 0.5538501561, 0.5546305931, 0.5559313215, 0.558012487, 0.5598335068, 0.5608740895, 0.5619146722, 0.5621748179, 0.5632154006, 0.5639958377, 0.5642559834, 0.5652965661, 0.5660770031, 0.5673777315, 0.5684183143, 0.570239334, 0.5704994797, 0.5712799168, 0.5715400624, 0.5723204995, 0.5725806452, 0.5733610822, 0.5754422477, 0.5757023933, 0.5764828304, 0.5785639958, 0.5788241415, 0.5796045786, 0.5806451613, 0.5837669095, 0.5840270552, 0.5850676379, 0.5861082206, 0.5868886576, 0.5881893861, 0.5902705515, 0.5920915713, 0.593132154, 0.5941727367, 0.5944328824, 0.5954734651, 0.5962539022, 0.5965140479, 0.5975546306, 0.5983350676, 0.599635796, 0.6006763788, 0.6024973985, 0.6027575442, 0.6035379813, 0.603798127, 0.604578564, 0.6048387097, 0.6056191467, 0.6077003122, 0.6079604579, 0.6087408949, 0.6108220604, 0.611082206, 0.6118626431, 0.6129032258, 0.616024974, 0.6162851197, 0.6173257024, 0.6183662851, 0.6191467222, 0.6204474506, 0.622528616, 0.6243496358, 0.6253902185, 0.6264308012, 0.6266909469, 0.6277315297, 0.6285119667, 0.6287721124, 0.6298126951, 0.6305931322, 0.6318938606, 0.6329344433, 0.6347554631, 0.6350156087, 0.6357960458, 0.6360561915, 0.6368366285, 0.6370967742, 0.6378772112, 0.6399583767, 0.6402185224, 0.6409989594, 0.6430801249, 0.6433402706, 0.6441207076, 0.6451612903, 0.6482830385, 0.6485431842, 0.6495837669, 0.6506243496, 0.6514047867, 0.6527055151, 0.6547866805, 0.6566077003, 0.657648283, 0.6586888658, 0.6589490114, 0.6599895942, 0.6607700312, 0.6610301769, 0.6620707596, 0.6628511967, 0.6641519251, 0.6651925078, 0.6670135276, 0.6672736733, 0.6680541103, 0.668314256, 0.669094693, 0.6693548387, 0.6701352758, 0.6722164412, 0.6724765869, 0.6732570239, 0.6753381894, 0.6755983351, 0.6763787721, 0.6774193548, 0.680541103, 0.6808012487, 0.6818418314, 0.6828824142, 0.6836628512, 0.6849635796, 0.6870447451, 0.6888657648, 0.6899063476, 0.6909469303, 0.691207076, 0.6922476587, 0.6930280957, 0.6932882414, 0.6943288241, 0.6951092612, 0.6964099896, 0.6974505723, 0.6992715921, 0.6995317378, 0.7003121748, 0.7005723205, 0.7013527575, 0.7016129032, 0.7023933403, 0.7044745057, 0.7047346514, 0.7055150885, 0.7075962539, 0.7078563996, 0.7086368366, 0.7096774194, 0.7127991675, 0.7130593132, 0.7140998959, 0.7151404787, 0.7159209157, 0.7172216441, 0.7193028096, 0.7211238293, 0.7221644121, 0.7232049948, 0.7234651405, 0.7245057232, 0.7252861603, 0.7255463059, 0.7265868887, 0.7273673257, 0.7286680541, 0.7297086368, 0.7315296566, 0.7317898023, 0.7325702393, 0.732830385, 0.7336108221, 0.7338709677, 0.7346514048, 0.7367325702, 0.7369927159, 0.737773153, 0.7398543184, 0.7401144641, 0.7408949011, 0.7419354839, 0.7450572321, 0.7453173777, 0.7463579605, 0.7473985432, 0.7481789802, 0.7494797086, 0.7515608741, 0.7533818939, 0.7544224766, 0.7554630593, 0.755723205, 0.7567637877, 0.7575442248, 0.7578043704, 0.7588449532, 0.7596253902, 0.7609261186, 0.7619667014, 0.7637877211, 0.7640478668, 0.7648283039, 0.7650884495, 0.7658688866, 0.7661290323, 0.7669094693, 0.7689906348, 0.7692507804, 0.7700312175, 0.7721123829, 0.7723725286, 0.7731529657, 0.7741935484, 0.7773152966, 0.7775754422, 0.778616025, 0.7796566077, 0.7804370447, 0.7817377732, 0.7838189386, 0.7856399584, 0.7866805411, 0.7877211238, 0.7879812695, 0.7890218522, 0.7898022893, 0.790062435, 0.7911030177, 0.7918834547, 0.7931841831, 0.7942247659, 0.7960457856, 0.7963059313, 0.7970863684, 0.797346514, 0.7981269511, 0.7983870968, 0.7991675338, 0.8012486993, 0.801508845, 0.802289282, 0.8043704475, 0.8046305931, 0.8054110302, 0.8064516129, 0.8095733611, 0.8098335068, 0.8108740895, 0.8119146722, 0.8126951093, 0.8139958377, 0.8160770031, 0.8178980229, 0.8189386056, 0.8199791883, 0.820239334, 0.8212799168, 0.8220603538, 0.8223204995, 0.8233610822, 0.8241415193, 0.8254422477, 0.8264828304, 0.8283038502, 0.8285639958, 0.8293444329, 0.8296045786, 0.8303850156, 0.8306451613, 0.8314255983, 0.8335067638, 0.8337669095, 0.8345473465, 0.836628512, 0.8368886576, 0.8376690947, 0.8387096774, 0.8418314256, 0.8420915713, 0.843132154, 0.8441727367, 0.8449531738, 0.8462539022, 0.8483350676, 0.8501560874, 0.8511966701, 0.8522372529, 0.8524973985, 0.8535379813, 0.8543184183, 0.854578564, 0.8556191467, 0.8563995838, 0.8577003122, 0.8587408949, 0.8605619147, 0.8608220604, 0.8616024974, 0.8618626431, 0.8626430801, 0.8629032258, 0.8636836629, 0.8657648283, 0.866024974, 0.866805411, 0.8688865765, 0.8691467222, 0.8699271592, 0.8709677419, 0.8740894901, 0.8743496358, 0.8753902185, 0.8764308012, 0.8772112383, 0.8785119667, 0.8805931322, 0.8824141519, 0.8834547347, 0.8844953174, 0.8847554631, 0.8857960458, 0.8865764828, 0.8868366285, 0.8878772112, 0.8886576483, 0.8899583767, 0.8909989594, 0.8928199792, 0.8930801249, 0.8938605619, 0.8941207076, 0.8949011446, 0.8951612903, 0.8959417274, 0.8980228928, 0.8982830385, 0.8990634755, 0.901144641, 0.9014047867, 0.9021852237, 0.9032258065, 0.9063475546, 0.9066077003, 0.907648283, 0.9086888658, 0.9094693028, 0.9107700312, 0.9128511967, 0.9146722164, 0.9157127992, 0.9167533819, 0.9170135276, 0.9180541103, 0.9188345473, 0.919094693, 0.9201352758, 0.9209157128, 0.9222164412, 0.9232570239, 0.9250780437, 0.9253381894, 0.9261186264, 0.9263787721, 0.9271592092, 0.9274193548, 0.9281997919, 0.9302809573, 0.930541103, 0.9313215401, 0.9334027055, 0.9336628512, 0.9344432882, 0.935483871, 0.9386056191, 0.9388657648, 0.9399063476, 0.9409469303, 0.9417273673, 0.9430280957, 0.9451092612, 0.946930281, 0.9479708637, 0.9490114464, 0.9492715921, 0.9503121748, 0.9510926119, 0.9513527575, 0.9523933403, 0.9531737773, 0.9544745057, 0.9555150885, 0.9573361082, 0.9575962539, 0.9583766909, 0.9586368366, 0.9594172737, 0.9596774194, 0.9604578564, 0.9625390219, 0.9627991675, 0.9635796046, 0.96566077, 0.9659209157, 0.9667013528, 0.9677419355, 0.9708636837, 0.9711238293, 0.9721644121, 0.9732049948, 0.9739854318, 0.9752861603, 0.9773673257, 0.9791883455, 0.9802289282, 0.9812695109, 0.9815296566, 0.9825702393, 0.9833506764, 0.9836108221, 0.9846514048, 0.9854318418, 0.9867325702, 0.987773153, 0.9895941727, 0.9898543184, 0.9906347555, 0.9908949011, 0.9916753382, 0.9919354839, 0.9927159209, 0.9947970864, 0.9950572321, 0.9958376691, 0.9979188345, 0.9981789802, 0.9989594173]
averages_even={0.0: [0.0], 0.25: [0.5], 0.0096253902: [0.0806451612903, 0.9193548387097], 0.1688345473: [0.6290322580645, 0.3709677419355], 0.3850156087: [0.258064516129, 0.741935483871], 0.0481269511: [0.0161290322581, 0.9838709677419], 0.3530176899: [0.6612903225806, 0.3387096774194], 0.6433402706: [0.6612903225806, 0.3387096774194], 0.6077003122: [0.1935483870968, 0.8064516129032], 0.3790322581: [0.5], 0.4560353798: [0.7903225806452, 0.2096774193548], 0.8626430801: [0.5161290322581, 0.4838709677419], 0.732830385: [0.5322580645161, 0.4677419354839], 0.3933402706: [0.1612903225806, 0.8387096774194], 0.4328824142: [0.7741935483871, 0.2258064516129], 0.0044224766: [0.7903225806452, 0.2096774193548], 0.482049948: [0.6612903225806, 0.3387096774194], 0.8764308012: [0.2741935483871, 0.7258064516129], 0.7817377732: [0.6290322580645, 0.3709677419355], 0.9274193548: [0.5], 0.3506763788: [0.0645161290323, 0.9354838709677], 0.5434443288: [0.1451612903226, 0.8548387096774], 0.0322580645: [0.0], 0.1675338189: [0.1612903225806, 0.8387096774194], 0.4383454735: [0.6935483870968, 0.3064516129032], 0.0478668054: [0.1290322580645, 0.8709677419355], 0.361082206: [0.1612903225806, 0.8387096774194], 0.7221644121: [0.7096774193548, 0.2903225806452], 0.1925078044: [0.0322580645161, 0.9677419354839], 0.4640998959: [0.7096774193548, 0.2903225806452], 0.232049948: [0.1612903225806, 0.8387096774194], 0.4838709677: [0.0], 0.9635796046: [0.0645161290323, 0.9354838709677], 0.5837669095: [0.6451612903226, 0.3548387096774], 0.0356399584: [0.8870967741935, 0.1129032258065], 0.4193548387: [0.0], 0.96566077: [0.258064516129, 0.741935483871], 0.7960457856: [0.4516129032258, 0.5483870967742], 0.5902705515: [0.0806451612903, 0.9193548387097], 0.5140478668: [0.258064516129, 0.741935483871], 0.6347554631: [0.4516129032258, 0.5483870967742], 0.3051508845: [0.4032258064516, 0.5967741935484], 0.7963059313: [0.758064516129, 0.241935483871], 0.6253902185: [0.7096774193548, 0.2903225806452], 0.0835067638: [0.6935483870968, 0.3064516129032], 0.6974505723: [0.5645161290323, 0.4354838709677], 0.4341831426: [0.4032258064516, 0.5967741935484], 0.8043704475: [0.258064516129, 0.741935483871], 0.2010926119: [0.6290322580645, 0.3709677419355], 0.5080645161: [0.5], 0.9721644121: [0.7903225806452, 0.2096774193548], 0.8563995838: [0.4193548387097, 0.5806451612903], 0.0814255983: [0.8225806451613, 0.1774193548387], 0.8285639958: [0.758064516129, 0.241935483871], 0.6620707596: [0.8225806451613, 0.1774193548387], 0.3132154006: [0.9032258064516, 0.0967741935484], 0.2812174818: [0.5322580645161, 0.4677419354839], 0.3902185224: [0.6451612903226, 0.3548387096774], 0.7911030177: [0.8225806451613, 0.1774193548387], 0.7408949011: [0.0322580645161, 0.9677419354839], 0.3072320499: [0.8225806451613, 0.1774193548387], 0.8368886576: [0.6612903225806, 0.3387096774194], 0.5704994797: [0.758064516129, 0.241935483871], 0.4102497399: [0.5322580645161, 0.4677419354839], 0.4872528616: [0.8870967741935, 0.1129032258065], 0.349635796: [0.1935483870968, 0.8064516129032], 0.8012486993: [0.1935483870968, 0.8064516129032], 0.2406347555: [0.4032258064516, 0.5967741935484], 0.9791883455: [0.3870967741935, 0.6129032258065], 0.546566077: [0.6612903225806, 0.3387096774194], 0.1831425598: [0.4516129032258, 0.5483870967742], 0.3662851197: [0.3870967741935, 0.6129032258065], 0.04578564: [0.2258064516129, 0.7741935483871], 0.7140998959: [0.7903225806452, 0.2096774193548], 0.9575962539: [0.758064516129, 0.241935483871], 0.360301769: [0.2741935483871, 0.7258064516129], 0.4693028096: [0.4193548387097, 0.5806451612903], 0.8418314256: [0.6451612903226, 0.3548387096774], 0.075962539: [0.3870967741935, 0.6129032258065], 0.1238293444: [0.1935483870968, 0.8064516129032], 0.2476586889: [0.4516129032258, 0.5483870967742], 0.1771592092: [0.0161290322581, 0.9838709677419], 0.5754422477: [0.1935483870968, 0.8064516129032], 0.7286680541: [0.6935483870968, 0.3064516129032], 0.4893340271: [0.2741935483871, 0.7258064516129], 0.8618626431: [0.5322580645161, 0.4677419354839], 0.4112903226: [0.5], 0.2416753382: [0.0161290322581, 0.9838709677419], 0.6527055151: [0.6290322580645, 0.3709677419355], 0.4630593132: [0.3870967741935, 0.6129032258065], 0.5119667014: [0.0645161290323, 0.9354838709677], 0.8834547347: [0.7096774193548, 0.2903225806452], 0.0460457856: [0.0483870967742, 0.9516129032258], 0.3683662851: [0.7741935483871, 0.2258064516129], 0.3363683663: [0.0483870967742, 0.9516129032258], 0.9867325702: [0.6935483870968, 0.3064516129032], 0.064516129: [0.0], 0.3623829344: [0.6290322580645, 0.3709677419355], 0.4393860562: [0.5645161290323, 0.4354838709677], 0.8616024974: [0.9032258064516, 0.0967741935484], 0.7044745057: [0.1935483870968, 0.8064516129032], 0.8264828304: [0.5645161290323, 0.4354838709677], 0.1781997919: [0.8225806451613, 0.1774193548387], 0.6441207076: [0.0322580645161, 0.9677419354839], 0.7127991675: [0.6451612903226, 0.3548387096774], 0.3350676379: [0.2903225806452, 0.7096774193548], 0.2167013528: [0.5322580645161, 0.4677419354839], 0.9094693028: [0.1612903225806, 0.8387096774194], 0.1592091571: [0.258064516129, 0.741935483871], 0.0398022893: [0.6290322580645, 0.3709677419355], 0.5954734651: [0.4032258064516, 0.5967741935484], 0.5528095734: [0.7903225806452, 0.2096774193548], 0.3124349636: [0.758064516129, 0.241935483871], 0.5608740895: [0.7096774193548, 0.2903225806452], 0.9947970864: [0.1935483870968, 0.8064516129032], 0.6641519251: [0.6935483870968, 0.3064516129032], 0.4474505723: [0.0645161290323, 0.9354838709677], 0.2237252862: [0.258064516129, 0.741935483871], 0.7003121748: [0.9032258064516, 0.0967741935484], 0.9222164412: [0.6935483870968, 0.3064516129032], 0.4047866805: [0.4193548387097, 0.5806451612903], 0.4414672216: [0.758064516129, 0.241935483871], 0.8189386056: [0.7096774193548, 0.2903225806452], 0.9157127992: [0.7096774193548, 0.2903225806452], 0.2177419355: [0.5], 0.2624869927: [0.7903225806452, 0.2096774193548], 0.5382414152: [0.758064516129, 0.241935483871], 0.0801248699: [0.1290322580645, 0.8709677419355], 0.2885015609: [0.6612903225806, 0.3387096774194], 0.0156087409: [0.1290322580645, 0.8709677419355], 0.9014047867: [0.6612903225806, 0.3387096774194], 0.0689386056: [0.7903225806452, 0.2096774193548], 0.3915192508: [0.7903225806452, 0.2096774193548], 0.551508845: [0.6451612903226, 0.3548387096774], 0.669094693: [0.5161290322581, 0.4838709677419], 0.8785119667: [0.6290322580645, 0.3709677419355], 0.7211238293: [0.3870967741935, 0.6129032258065], 0.4175338189: [0.6612903225806, 0.3387096774194], 0.1560874089: [0.1935483870968, 0.8064516129032], 0.5881893861: [0.6290322580645, 0.3709677419355], 0.0676378772: [0.6451612903226, 0.3548387096774], 0.7721123829: [0.258064516129, 0.741935483871], 0.7151404787: [0.2741935483871, 0.7258064516129], 0.8337669095: [0.1451612903226, 0.8548387096774], 0.0249739854: [0.3225806451613, 0.6774193548387], 0.5028616025: [0.6935483870968, 0.3064516129032], 0.0998959417: [0.6451612903226, 0.3548387096774], 0.8909989594: [0.5645161290323, 0.4354838709677], 0.2382934443: [0.2903225806452, 0.7096774193548], 0.0595733611: [0.1451612903226, 0.8548387096774], 0.6888657648: [0.3870967741935, 0.6129032258065], 0.9281997919: [0.3225806451613, 0.6774193548387], 0.7232049948: [0.7741935483871, 0.2258064516129], 0.470603538: [0.6935483870968, 0.3064516129032], 0.0031217482: [0.6451612903226, 0.3548387096774], 0.1508844953: [0.4516129032258, 0.5483870967742], 0.5039021852: [0.5645161290323, 0.4354838709677], 0.9513527575: [0.0161290322581, 0.9838709677419], 0.8857960458: [0.4032258064516, 0.5967741935484], 0.9812695109: [0.7741935483871, 0.2258064516129], 0.174817898: [0.2258064516129, 0.7741935483871], 0.3176378772: [0.1451612903226, 0.8548387096774], 0.6722164412: [0.1935483870968, 0.8064516129032], 0.6828824142: [0.2741935483871, 0.7258064516129], 0.4120707596: [0.3225806451613, 0.6774193548387], 0.6402185224: [0.1451612903226, 0.8548387096774], 0.5538501561: [0.2741935483871, 0.7258064516129], 0.9846514048: [0.8225806451613, 0.1774193548387], 0.1196670135: [0.9032258064516, 0.0967741935484], 0.2393340271: [0.7741935483871, 0.2258064516129], 0.4466701353: [0.1451612903226, 0.8548387096774], 0.0926118626: [0.0645161290323, 0.9354838709677], 0.8577003122: [0.6935483870968, 0.3064516129032], 0.3384495317: [0.0161290322581, 0.9838709677419], 0.6048387097: [0.5], 0.2333506764: [0.6290322580645, 0.3709677419355], 0.2937044745: [0.8870967741935, 0.1129032258065], 0.8522372529: [0.7741935483871, 0.2258064516129], 0.1011966701: [0.7903225806452, 0.2096774193548], 0.6701352758: [0.3225806451613, 0.6774193548387], 0.1438605619: [0.4032258064516, 0.5967741935484], 0.7775754422: [0.8870967741935, 0.1129032258065], 0.4547346514: [0.6451612903226, 0.3548387096774], 0.4227367326: [0.8870967741935, 0.1129032258065], 0.7515608741: [0.0806451612903, 0.9193548387097], 0.209157128: [0.1290322580645, 0.8709677419355], 0.5463059313: [0.258064516129, 0.741935483871], 0.9209157128: [0.4193548387097, 0.5806451612903], 0.8483350676: [0.0806451612903, 0.9193548387097], 0.7866805411: [0.7096774193548, 0.2903225806452], 0.2083766909: [0.4032258064516, 0.5967741935484], 0.2757544225: [0.4193548387097, 0.5806451612903], 0.8046305931: [0.6612903225806, 0.3387096774194], 0.5962539022: [0.1290322580645, 0.8709677419355], 0.0377211238: [0.2741935483871, 0.7258064516129], 0.3017689906: [0.3870967741935, 0.6129032258065], 0.5715400624: [0.5322580645161, 0.4677419354839], 0.5109261186: [0.1935483870968, 0.8064516129032], 0.29578564: [0.2741935483871, 0.7258064516129], 0.5275754422: [0.3870967741935, 0.6129032258065], 0.946930281: [0.3870967741935, 0.6129032258065], 0.0221123829: [0.758064516129, 0.241935483871], 0.6482830385: [0.6451612903226, 0.3548387096774], 0.1449011446: [0.0161290322581, 0.9838709677419], 0.7336108221: [0.5161290322581, 0.4838709677419], 0.5796045786: [0.0322580645161, 0.9677419354839], 0.424817898: [0.2741935483871, 0.7258064516129], 0.3257023933: [0.6451612903226, 0.3548387096774], 0.6992715921: [0.4516129032258, 0.5483870967742], 0.9836108221: [0.0161290322581, 0.9838709677419], 0.6087408949: [0.0645161290323, 0.9354838709677], 0.9302809573: [0.1935483870968, 0.8064516129032], 0.2479188345: [0.758064516129, 0.241935483871], 0.0863683663: [0.4516129032258, 0.5483870967742], 0.4651404787: [0.7741935483871, 0.2258064516129], 0.2718522373: [0.0483870967742, 0.9516129032258], 0.7297086368: [0.5645161290323, 0.4354838709677], 0.8119146722: [0.2741935483871, 0.7258064516129], 0.5317377732: [0.1290322580645, 0.8709677419355], 0.1209677419: [0.5], 0.3766909469: [0.4516129032258, 0.5483870967742], 0.8054110302: [0.0322580645161, 0.9677419354839], 0.3798126951: [0.3225806451613, 0.6774193548387], 0.7096774194: [0.0], 0.7609261186: [0.6935483870968, 0.3064516129032], 0.1844432882: [0.5322580645161, 0.4677419354839], 0.737773153: [0.0645161290323, 0.9354838709677], 0.6409989594: [0.0645161290323, 0.9354838709677], 0.6118626431: [0.0322580645161, 0.9677419354839], 0.2539021852: [0.0645161290323, 0.9354838709677], 0.2070759625: [0.7741935483871, 0.2258064516129], 0.5619146722: [0.7741935483871, 0.2258064516129], 0.7856399584: [0.3870967741935, 0.6129032258065], 0.2489594173: [0.5322580645161, 0.4677419354839], 0.6818418314: [0.7903225806452, 0.2096774193548], 0.9128511967: [0.0806451612903, 0.9193548387097], 0.9334027055: [0.258064516129, 0.741935483871], 0.9927159209: [0.3225806451613, 0.6774193548387], 0.7338709677: [0.5], 0.6129032258: [0.0], 0.1594693028: [0.6612903225806, 0.3387096774194], 0.668314256: [0.5322580645161, 0.4677419354839], 0.2861602497: [0.0645161290323, 0.9354838709677], 0.6899063476: [0.7096774193548, 0.2903225806452], 0.7255463059: [0.0161290322581, 0.9838709677419], 0.5723204995: [0.5161290322581, 0.4838709677419], 0.2239854318: [0.6612903225806, 0.3387096774194], 0.5296566077: [0.7741935483871, 0.2258064516129], 0.7369927159: [0.1451612903226, 0.8548387096774], 0.1207075963: [0.5161290322581, 0.4838709677419], 0.9479708637: [0.7096774193548, 0.2903225806452], 0.2049947971: [0.3870967741935, 0.6129032258065], 0.0512486993: [0.6935483870968, 0.3064516129032], 0.9739854318: [0.1612903225806, 0.8387096774194], 0.5983350676: [0.4193548387097, 0.5806451612903], 0.0200312175: [0.5645161290323, 0.4354838709677], 0.8982830385: [0.1451612903226, 0.8548387096774], 0.4040062435: [0.8225806451613, 0.1774193548387], 0.9573361082: [0.4516129032258, 0.5483870967742], 0.7658688866: [0.5161290322581, 0.4838709677419], 0.523673257: [0.6290322580645, 0.3709677419355], 0.0593132154: [0.1935483870968, 0.8064516129032], 0.3548387097: [0.0], 0.0353798127: [0.6451612903226, 0.3548387096774], 0.7648283039: [0.9032258064516, 0.0967741935484], 0.3121748179: [0.4516129032258, 0.5483870967742], 0.9583766909: [0.9032258064516, 0.0967741935484], 0.5785639958: [0.258064516129, 0.741935483871], 0.5861082206: [0.2741935483871, 0.7258064516129], 0.2770551509: [0.6935483870968, 0.3064516129032], 0.0418834547: [0.0806451612903, 0.9193548387097], 0.1030176899: [0.1612903225806, 0.8387096774194], 0.3465140479: [0.5161290322581, 0.4838709677419], 0.4060874089: [0.6935483870968, 0.3064516129032], 0.6808012487: [0.8870967741935, 0.1129032258065], 0.6183662851: [0.2741935483871, 0.7258064516129], 0.7554630593: [0.7741935483871, 0.2258064516129], 0.6285119667: [0.1290322580645, 0.8709677419355], 0.8098335068: [0.8870967741935, 0.1129032258065], 0.9989594173: [0.0322580645161, 0.9677419354839], 0.4008844953: [0.0483870967742, 0.9516129032258], 0.1425598335: [0.2258064516129, 0.7741935483871], 0.2531217482: [0.1451612903226, 0.8548387096774], 0.8709677419: [0.0], 0.6870447451: [0.0806451612903, 0.9193548387097], 0.7346514048: [0.3225806451613, 0.6774193548387], 0.8303850156: [0.5161290322581, 0.4838709677419], 0.1365764828: [0.6290322580645, 0.3709677419355], 0.1750780437: [0.0483870967742, 0.9516129032258], 0.7877211238: [0.7741935483871, 0.2258064516129], 0.7983870968: [0.5], 0.505723205: [0.4516129032258, 0.5483870967742], 0.8449531738: [0.1612903225806, 0.8387096774194], 0.5411030177: [0.3225806451613, 0.6774193548387], 0.1123829344: [0.1290322580645, 0.8709677419355], 0.593132154: [0.7096774193548, 0.2903225806452], 0.2611862643: [0.6451612903226, 0.3548387096774], 0.2395941727: [0.0483870967742, 0.9516129032258], 0.3381893861: [0.1290322580645, 0.8709677419355], 0.5920915713: [0.3870967741935, 0.6129032258065], 0.3322060354: [0.0806451612903, 0.9193548387097], 0.0551508845: [0.9032258064516, 0.0967741935484], 0.9170135276: [0.0483870967742, 0.9516129032258], 0.3748699272: [0.5645161290323, 0.4354838709677], 0.116805411: [0.5645161290323, 0.4354838709677], 0.4672216441: [0.1290322580645, 0.8709677419355], 0.4352237253: [0.0161290322581, 0.9838709677419], 0.9250780437: [0.4516129032258, 0.5483870967742], 0.6430801249: [0.258064516129, 0.741935483871], 0.8824141519: [0.3870967741935, 0.6129032258065], 0.1761186264: [0.4032258064516, 0.5967741935484], 0.6724765869: [0.1451612903226, 0.8548387096774], 0.1571279917: [0.0645161290323, 0.9354838709677], 0.220603538: [0.1935483870968, 0.8064516129032], 0.6951092612: [0.4193548387097, 0.5806451612903], 0.3402705515: [0.4193548387097, 0.5806451612903], 0.1043184183: [0.6290322580645, 0.3709677419355], 0.4172736733: [0.258064516129, 0.741935483871], 0.6370967742: [0.5], 0.7450572321: [0.6451612903226, 0.3548387096774], 0.2216441207: [0.0645161290323, 0.9354838709677], 0.0554110302: [0.5322580645161, 0.4677419354839], 0.5944328824: [0.0483870967742, 0.9516129032258], 0.7567637877: [0.4032258064516, 0.5967741935484], 0.0634755463: [0.0322580645161, 0.9677419354839], 0.6566077003: [0.3870967741935, 0.6129032258065], 0.755723205: [0.0483870967742, 0.9516129032258], 0.2903225806: [0.0], 0.5806451613: [0.0], 0.9773673257: [0.0806451612903, 0.9193548387097], 0.9596774194: [0.5], 0.8556191467: [0.8225806451613, 0.1774193548387], 0.3475546306: [0.3225806451613, 0.6774193548387], 0.6006763788: [0.5645161290323, 0.4354838709677], 0.1966701353: [0.6451612903226, 0.3548387096774], 0.9490114464: [0.7741935483871, 0.2258064516129], 0.8064516129: [0.0], 0.3103537981: [0.5645161290323, 0.4354838709677], 0.4963579605: [0.2903225806452, 0.7096774193548], 0.7159209157: [0.1612903225806, 0.8387096774194], 0.6693548387: [0.5], 0.1521852237: [0.5322580645161, 0.4677419354839], 0.7931841831: [0.6935483870968, 0.3064516129032], 0.500780437: [0.8225806451613, 0.1774193548387], 0.8743496358: [0.8870967741935, 0.1129032258065], 0.7879812695: [0.0483870967742, 0.9516129032258], 0.1269510926: [0.258064516129, 0.741935483871], 0.8241415193: [0.4193548387097, 0.5806451612903], 0.7193028096: [0.0806451612903, 0.9193548387097], 0.1272112383: [0.6612903225806, 0.3387096774194], 0.1126430801: [0.0161290322581, 0.9838709677419], 0.8511966701: [0.7096774193548, 0.2903225806452], 0.4495317378: [0.258064516129, 0.741935483871], 0.6191467222: [0.1612903225806, 0.8387096774194], 0.4664412071: [0.4032258064516, 0.5967741935484], 0.2882414152: [0.258064516129, 0.741935483871], 0.1917273673: [0.6612903225806, 0.3387096774194], 0.9503121748: [0.4032258064516, 0.5967741935484], 0.2302289282: [0.7903225806452, 0.2096774193548], 0.4516129032: [0.0], 0.9510926119: [0.1290322580645, 0.8709677419355], 0.1727367326: [0.3870967741935, 0.6129032258065], 0.9627991675: [0.1451612903226, 0.8548387096774], 0.901144641: [0.258064516129, 0.741935483871], 0.3225806452: [0.0], 0.6350156087: [0.758064516129, 0.241935483871], 0.3394901145: [0.8225806451613, 0.1774193548387], 0.9950572321: [0.1451612903226, 0.8548387096774], 0.8951612903: [0.5], 0.1186264308: [0.4516129032258, 0.5483870967742], 0.1667533819: [0.2741935483871, 0.7258064516129], 0.4425078044: [0.5322580645161, 0.4677419354839], 0.9908949011: [0.5322580645161, 0.4677419354839], 0.7245057232: [0.4032258064516, 0.5967741935484], 0.4685223725: [0.8225806451613, 0.1774193548387], 0.6849635796: [0.6290322580645, 0.3709677419355], 0.8441727367: [0.2741935483871, 0.7258064516129], 0.2312695109: [0.2741935483871, 0.7258064516129], 0.0803850156: [0.0161290322581, 0.9838709677419], 0.9711238293: [0.8870967741935, 0.1129032258065], 0.0678980229: [0.8870967741935, 0.1129032258065], 0.1737773153: [0.2903225806452, 0.7096774193548], 0.9898543184: [0.758064516129, 0.241935483871], 0.6243496358: [0.3870967741935, 0.6129032258065], 0.7078563996: [0.6612903225806, 0.3387096774194], 0.3415712799: [0.6935483870968, 0.3064516129032], 0.4505723205: [0.0322580645161, 0.9677419354839], 0.7991675338: [0.3225806451613, 0.6774193548387], 0.4255983351: [0.1612903225806, 0.8387096774194], 0.919094693: [0.0161290322581, 0.9838709677419], 0.2715920916: [0.7741935483871, 0.2258064516129], 0.9659209157: [0.6612903225806, 0.3387096774194], 0.9981789802: [0.6612903225806, 0.3387096774194], 0.8772112383: [0.1612903225806, 0.8387096774194], 0.2656087409: [0.6290322580645, 0.3709677419355], 0.9677419355: [0.0], 0.6774193548: [0.0], 0.854578564: [0.0161290322581, 0.9838709677419], 0.1001560874: [0.8870967741935, 0.1129032258065], 0.5192507804: [0.6451612903226, 0.3548387096774], 0.0054630593: [0.2741935483871, 0.7258064516129], 0.1428199792: [0.0483870967742, 0.9516129032258], 0.394640999: [0.6290322580645, 0.3709677419355], 0.5632154006: [0.4032258064516, 0.5967741935484], 0.4141519251: [0.1935483870968, 0.8064516129032], 0.5205515088: [0.7903225806452, 0.2096774193548], 0.7773152966: [0.6451612903226, 0.3548387096774], 0.4976586889: [0.0483870967742, 0.9516129032258], 0.5975546306: [0.8225806451613, 0.1774193548387], 0.0564516129: [0.5], 0.273673257: [0.1290322580645, 0.8709677419355], 0.2458376691: [0.5645161290323, 0.4354838709677], 0.1352757544: [0.1612903225806, 0.8387096774194], 0.5673777315: [0.6935483870968, 0.3064516129032], 0.2676899063: [0.0806451612903, 0.9193548387097], 0.1883454735: [0.1935483870968, 0.8064516129032], 0.836628512: [0.258064516129, 0.741935483871], 0.0915712799: [0.1935483870968, 0.8064516129032], 0.4027055151: [0.1290322580645, 0.8709677419355], 0.1199271592: [0.5322580645161, 0.4677419354839], 0.7005723205: [0.5322580645161, 0.4677419354839], 0.8293444329: [0.9032258064516, 0.0967741935484], 0.3967221644: [0.0806451612903, 0.9193548387097], 0.02289282: [0.9032258064516, 0.0967741935484], 0.8387096774: [0.0], 0.4997398543: [0.0161290322581, 0.9838709677419], 0.4006243496: [0.7741935483871, 0.2258064516129], 0.5621748179: [0.0483870967742, 0.9516129032258], 0.7692507804: [0.1451612903226, 0.8548387096774], 0.5195109261: [0.8870967741935, 0.1129032258065], 0.3527575442: [0.258064516129, 0.741935483871], 0.7265868887: [0.8225806451613, 0.1774193548387], 0.8160770031: [0.0806451612903, 0.9193548387097], 0.1893860562: [0.0645161290323, 0.9354838709677], 0.3467741935: [0.5], 0.801508845: [0.1451612903226, 0.8548387096774], 0.4349635796: [0.1290322580645, 0.8709677419355], 0.6930280957: [0.1290322580645, 0.8709677419355], 0.3704474506: [0.1290322580645, 0.8709677419355], 0.4817898023: [0.258064516129, 0.741935483871], 0.4497918835: [0.6612903225806, 0.3387096774194], 0.1834027055: [0.758064516129, 0.241935483871], 0.9386056191: [0.6451612903226, 0.3548387096774], 0.3028095734: [0.2903225806452, 0.7096774193548], 0.8254422477: [0.6935483870968, 0.3064516129032], 0.3288241415: [0.1612903225806, 0.8387096774194], 0.0822060354: [0.4193548387097, 0.5806451612903], 0.8959417274: [0.3225806451613, 0.6774193548387], 0.5712799168: [0.9032258064516, 0.0967741935484], 0.0887096774: [0.5], 0.4318418314: [0.7096774193548, 0.2903225806452], 0.1321540062: [0.6451612903226, 0.3548387096774], 0.5392819979: [0.5322580645161, 0.4677419354839], 0.0572320499: [0.3225806451613, 0.6774193548387], 0.1144640999: [0.4193548387097, 0.5806451612903], 0.907648283: [0.7903225806452, 0.2096774193548], 0.0135275754: [0.2258064516129, 0.7741935483871], 0.6079604579: [0.1451612903226, 0.8548387096774], 0.6610301769: [0.0161290322581, 0.9838709677419], 0.7086368366: [0.0322580645161, 0.9677419354839], 0.27289282: [0.4032258064516, 0.5967741935484], 0.4953173777: [0.3870967741935, 0.6129032258065], 0.1415192508: [0.2903225806452, 0.7096774193548], 0.5338189386: [0.4193548387097, 0.5806451612903], 0.1334547347: [0.7903225806452, 0.2096774193548], 0.7661290323: [0.5], 0.401925078: [0.4032258064516, 0.5967741935484], 0.4745057232: [0.9032258064516, 0.0967741935484], 0.4099895942: [0.9032258064516, 0.0967741935484], 0.4994797086: [0.1290322580645, 0.8709677419355], 0.1979708637: [0.7903225806452, 0.2096774193548], 0.3673257024: [0.2903225806452, 0.7096774193548], 0.7453173777: [0.8870967741935, 0.1129032258065], 0.7575442248: [0.1290322580645, 0.8709677419355], 0.1404786681: [0.3870967741935, 0.6129032258065], 0.3579604579: [0.6451612903226, 0.3548387096774], 0.0894901145: [0.3225806451613, 0.6774193548387], 0.8095733611: [0.6451612903226, 0.3548387096774], 0.6599895942: [0.4032258064516, 0.5967741935484], 0.2749739854: [0.8225806451613, 0.1774193548387], 0.7596253902: [0.4193548387097, 0.5806451612903], 0.843132154: [0.7903225806452, 0.2096774193548], 0.8199791883: [0.7741935483871, 0.2258064516129], 0.1344953174: [0.2741935483871, 0.7258064516129], 0.243496358: [0.4193548387097, 0.5806451612903], 0.570239334: [0.4516129032258, 0.5483870967742], 0.8420915713: [0.8870967741935, 0.1129032258065], 0.9146722164: [0.3870967741935, 0.6129032258065], 0.3270031217: [0.7903225806452, 0.2096774193548], 0.0239334027: [0.5161290322581, 0.4838709677419], 0.1990114464: [0.2741935483871, 0.7258064516129], 0.2570239334: [0.0322580645161, 0.9677419354839], 0.6680541103: [0.9032258064516, 0.0967741935484], 0.3340270552: [0.3870967741935, 0.6129032258065], 0.0176899063: [0.4193548387097, 0.5806451612903], 0.547346514: [0.0322580645161, 0.9677419354839], 0.558012487: [0.0806451612903, 0.9193548387097], 0.3280437045: [0.2741935483871, 0.7258064516129], 0.1092611863: [0.2903225806452, 0.7096774193548], 0.0273152966: [0.1451612903226, 0.8548387096774], 0.7401144641: [0.6612903225806, 0.3387096774194], 0.1157648283: [0.6935483870968, 0.3064516129032], 0.8139958377: [0.6290322580645, 0.3709677419355], 0.9086888658: [0.2741935483871, 0.7258064516129], 0.308012487: [0.4193548387097, 0.5806451612903], 0.4570759625: [0.2741935483871, 0.7258064516129], 0.9732049948: [0.2741935483871, 0.7258064516129], 0.8233610822: [0.8225806451613, 0.1774193548387], 0.5840270552: [0.8870967741935, 0.1129032258065], 0.9021852237: [0.0322580645161, 0.9677419354839], 0.9667013528: [0.0322580645161, 0.9677419354839], 0.7741935484: [0.0], 0.9625390219: [0.1935483870968, 0.8064516129032], 0.3361082206: [0.7741935483871, 0.2258064516129], 0.3041103018: [0.0483870967742, 0.9516129032258], 0.7970863684: [0.9032258064516, 0.0967741935484], 0.3301248699: [0.6290322580645, 0.3709677419355], 0.1490634755: [0.5645161290323, 0.4354838709677], 0.482830385: [0.0322580645161, 0.9677419354839], 0.1467221644: [0.4193548387097, 0.5806451612903], 0.4331425598: [0.0483870967742, 0.9516129032258], 0.6162851197: [0.8870967741935, 0.1129032258065], 0.8535379813: [0.4032258064516, 0.5967741935484], 0.8930801249: [0.758064516129, 0.241935483871], 0.0470863684: [0.4032258064516, 0.5967741935484], 0.6357960458: [0.9032258064516, 0.0967741935484], 0.5059833507: [0.758064516129, 0.241935483871], 0.459157128: [0.6290322580645, 0.3709677419355], 0.2135796046: [0.5645161290323, 0.4354838709677], 0.2073361082: [0.0483870967742, 0.9516129032258], 0.6378772112: [0.3225806451613, 0.6774193548387], 0.9063475546: [0.6451612903226, 0.3548387096774], 0.0780437045: [0.2258064516129, 0.7741935483871], 0.2801768991: [0.758064516129, 0.241935483871], 0.8220603538: [0.1290322580645, 0.8709677419355], 0.6368366285: [0.5161290322581, 0.4838709677419], 0.3061914672: [0.0161290322581, 0.9838709677419], 0.4151925078: [0.0645161290323, 0.9354838709677], 0.5483870968: [0.0], 0.7473985432: [0.2741935483871, 0.7258064516129], 0.9430280957: [0.6290322580645, 0.3709677419355], 0.9336628512: [0.6612903225806, 0.3387096774194], 0.7047346514: [0.1451612903226, 0.8548387096774], 0.4092091571: [0.758064516129, 0.241935483871], 0.5684183143: [0.5645161290323, 0.4354838709677], 0.7544224766: [0.7096774193548, 0.2903225806452], 0.2830385016: [0.3225806451613, 0.6774193548387], 0.778616025: [0.7903225806452, 0.2096774193548], 0.3454734651: [0.9032258064516, 0.0967741935484], 0.0720603538: [0.6290322580645, 0.3709677419355], 0.2562434964: [0.6612903225806, 0.3387096774194], 0.3707075963: [0.0161290322581, 0.9838709677419], 0.9833506764: [0.1290322580645, 0.8709677419355], 0.9167533819: [0.7741935483871, 0.2258064516129], 0.2822580645: [0.5], 0.3592611863: [0.7903225806452, 0.2096774193548], 0.0793444329: [0.4032258064516, 0.5967741935484], 0.987773153: [0.5645161290323, 0.4354838709677], 0.4370447451: [0.4193548387097, 0.5806451612903], 0.3852757544: [0.6612903225806, 0.3387096774194], 0.4882934443: [0.7903225806452, 0.2096774193548], 0.8524973985: [0.0483870967742, 0.9516129032258], 0.2643080125: [0.1612903225806, 0.8387096774194], 0.6287721124: [0.0161290322581, 0.9838709677419], 0.8223204995: [0.0161290322581, 0.9838709677419], 0.6514047867: [0.1612903225806, 0.8387096774194], 0.0918314256: [0.1451612903226, 0.8548387096774], 0.0624349636: [0.258064516129, 0.741935483871], 0.9958376691: [0.0645161290323, 0.9354838709677], 0.3860561915: [0.0322580645161, 0.9677419354839], 0.4464099896: [0.1935483870968, 0.8064516129032], 0.7619667014: [0.5645161290323, 0.4354838709677], 0.0491675338: [0.8225806451613, 0.1774193548387], 0.1646722164: [0.8870967741935, 0.1129032258065], 0.6586888658: [0.7741935483871, 0.2258064516129], 0.5725806452: [0.5], 0.4737252862: [0.758064516129, 0.241935483871], 0.1240894901: [0.1451612903226, 0.8548387096774], 0.7533818939: [0.3870967741935, 0.6129032258065], 0.7130593132: [0.8870967741935, 0.1129032258065], 0.2291883455: [0.8870967741935, 0.1129032258065], 0.5150884495: [0.0322580645161, 0.9677419354839], 0.8296045786: [0.5322580645161, 0.4677419354839], 0.3777315297: [0.9032258064516, 0.0967741935484], 0.5161290323: [0.0], 0.6547866805: [0.0806451612903, 0.9193548387097], 0.7234651405: [0.0483870967742, 0.9516129032258], 0.7640478668: [0.758064516129, 0.241935483871], 0.4654006244: [0.0483870967742, 0.9516129032258], 0.5660770031: [0.4193548387097, 0.5806451612903], 0.1116024974: [0.4032258064516, 0.5967741935484], 0.5788241415: [0.6612903225806, 0.3387096774194], 0.4144120708: [0.1451612903226, 0.8548387096774], 0.866805411: [0.0645161290323, 0.9354838709677], 0.8844953174: [0.7741935483871, 0.2258064516129], 0.2060353798: [0.2903225806452, 0.7096774193548], 0.1657127992: [0.7903225806452, 0.2096774193548], 0.7494797086: [0.6290322580645, 0.3709677419355], 0.2934443288: [0.6451612903226, 0.3548387096774], 0.4973985432: [0.7741935483871, 0.2258064516129], 0.0231529657: [0.5322580645161, 0.4677419354839], 0.7650884495: [0.5322580645161, 0.4677419354839], 0.616024974: [0.6451612903226, 0.3548387096774], 0.9180541103: [0.4032258064516, 0.5967741935484], 0.6108220604: [0.258064516129, 0.741935483871], 0.5361602497: [0.5645161290323, 0.4354838709677], 0.3134755463: [0.5322580645161, 0.4677419354839], 0.4224765869: [0.6451612903226, 0.3548387096774], 0.3904786681: [0.8870967741935, 0.1129032258065], 0.1248699272: [0.0645161290323, 0.9354838709677], 0.6670135276: [0.4516129032258, 0.5483870967742], 0.7689906348: [0.1935483870968, 0.8064516129032], 0.9188345473: [0.1290322580645, 0.8709677419355], 0.2695109261: [0.3870967741935, 0.6129032258065], 0.5070239334: [0.5322580645161, 0.4677419354839], 0.0148283039: [0.4032258064516, 0.5967741935484], 0.2635275754: [0.2741935483871, 0.7258064516129], 0.186264308: [0.3225806451613, 0.6774193548387], 0.7481789802: [0.1612903225806, 0.8387096774194], 0.528616025: [0.7096774193548, 0.2903225806452], 0.7055150885: [0.0645161290323, 0.9354838709677], 0.599635796: [0.6935483870968, 0.3064516129032], 0.3985431842: [0.3870967741935, 0.6129032258065], 0.6628511967: [0.4193548387097, 0.5806451612903], 0.1188865765: [0.758064516129, 0.241935483871], 0.8878772112: [0.8225806451613, 0.1774193548387], 0.3925598335: [0.2741935483871, 0.7258064516129], 0.1802809573: [0.6935483870968, 0.3064516129032], 0.2780957336: [0.5645161290323, 0.4354838709677], 0.7796566077: [0.2741935483871, 0.7258064516129], 0.8868366285: [0.0161290322581, 0.9838709677419], 0.5517689906: [0.8870967741935, 0.1129032258065], 0.3870967742: [0.0], 0.9271592092: [0.5161290322581, 0.4838709677419], 0.9815296566: [0.0483870967742, 0.9516129032258], 0.8805931322: [0.0806451612903, 0.9193548387097], 0.5111862643: [0.1451612903226, 0.8548387096774], 0.2447970864: [0.6935483870968, 0.3064516129032], 0.6995317378: [0.758064516129, 0.241935483871], 0.9895941727: [0.4516129032258, 0.5483870967742], 0.7172216441: [0.6290322580645, 0.3709677419355], 0.3426118626: [0.5645161290323, 0.4354838709677], 0.8108740895: [0.7903225806452, 0.2096774193548], 0.9201352758: [0.8225806451613, 0.1774193548387], 0.5764828304: [0.0645161290323, 0.9354838709677], 0.6932882414: [0.0161290322581, 0.9838709677419], 0.3686264308: [0.0483870967742, 0.9516129032258], 0.6732570239: [0.0645161290323, 0.9354838709677], 0.0946930281: [0.258064516129, 0.741935483871], 0.7578043704: [0.0161290322581, 0.9838709677419], 0.1813215401: [0.5645161290323, 0.4354838709677], 0.3173777315: [0.1935483870968, 0.8064516129032], 0.9313215401: [0.0645161290323, 0.9354838709677], 0.0366805411: [0.7903225806452, 0.2096774193548], 0.7942247659: [0.5645161290323, 0.4354838709677], 0.9544745057: [0.6935483870968, 0.3064516129032], 0.0876690947: [0.5322580645161, 0.4677419354839], 0.5015608741: [0.4193548387097, 0.5806451612903], 0.9802289282: [0.7096774193548, 0.2903225806452], 0.7588449532: [0.8225806451613, 0.1774193548387], 0.3446930281: [0.758064516129, 0.241935483871], 0.1563475546: [0.1451612903226, 0.8548387096774], 0.8949011446: [0.5161290322581, 0.4838709677419], 0.9261186264: [0.9032258064516, 0.0967741935484], 0.1532258065: [0.5], 0.8314255983: [0.3225806451613, 0.6774193548387], 0.285119667: [0.1935483870968, 0.8064516129032], 0.6909469303: [0.7741935483871, 0.2258064516129], 0.9232570239: [0.5645161290323, 0.4354838709677], 0.2208636837: [0.1451612903226, 0.8548387096774], 0.7367325702: [0.1935483870968, 0.8064516129032], 0.6589490114: [0.0483870967742, 0.9516129032258], 0.418314256: [0.0322580645161, 0.9677419354839], 0.2947450572: [0.7903225806452, 0.2096774193548], 0.6495837669: [0.7903225806452, 0.2096774193548], 0.3207596254: [0.6612903225806, 0.3387096774194], 0.8462539022: [0.6290322580645, 0.3709677419355], 0.4071279917: [0.5645161290323, 0.4354838709677], 0.8990634755: [0.0645161290323, 0.9354838709677], 0.4237773153: [0.7903225806452, 0.2096774193548], 0.0626951093: [0.6612903225806, 0.3387096774194], 0.353798127: [0.0322580645161, 0.9677419354839], 0.866024974: [0.1451612903226, 0.8548387096774], 0.9594172737: [0.5161290322581, 0.4838709677419], 0.5215920916: [0.2741935483871, 0.7258064516129], 0.797346514: [0.5322580645161, 0.4677419354839], 0.6329344433: [0.5645161290323, 0.4354838709677], 0.4308012487: [0.3870967741935, 0.6129032258065], 0.6056191467: [0.3225806451613, 0.6774193548387], 0.0949531738: [0.6612903225806, 0.3387096774194], 0.1324141519: [0.8870967741935, 0.1129032258065], 0.3738293444: [0.6935483870968, 0.3064516129032], 0.2414151925: [0.1290322580645, 0.8709677419355], 0.0301768991: [0.258064516129, 0.741935483871], 0.5088449532: [0.3225806451613, 0.6774193548387], 0.2112382934: [0.4193548387097, 0.5806451612903], 0.611082206: [0.6612903225806, 0.3387096774194], 0.0189906348: [0.6935483870968, 0.3064516129032], 0.196930281: [0.8870967741935, 0.1129032258065], 0.2528616025: [0.1935483870968, 0.8064516129032], 0.5598335068: [0.3870967741935, 0.6129032258065], 0.9916753382: [0.5161290322581, 0.4838709677419], 0.7013527575: [0.5161290322581, 0.4838709677419], 0.5309573361: [0.4032258064516, 0.5967741935484], 0.5257544225: [0.0806451612903, 0.9193548387097], 0.0312174818: [0.0322580645161, 0.9677419354839], 0.9906347555: [0.9032258064516, 0.0967741935484], 0.8608220604: [0.758064516129, 0.241935483871], 0.3818938606: [0.1935483870968, 0.8064516129032], 0.3498959417: [0.1451612903226, 0.8548387096774], 0.4758064516: [0.5], 0.622528616: [0.0806451612903, 0.9193548387097], 0.3821540062: [0.1451612903226, 0.8548387096774], 0.8980228928: [0.1935483870968, 0.8064516129032], 0.7398543184: [0.258064516129, 0.741935483871], 0.4789281998: [0.1451612903226, 0.8548387096774], 0.8938605619: [0.9032258064516, 0.0967741935484], 0.1529656608: [0.5161290322581, 0.4838709677419], 0.657648283: [0.2903225806452, 0.7096774193548], 0.6318938606: [0.6935483870968, 0.3064516129032], 0.1644120708: [0.6451612903226, 0.3548387096774], 0.8543184183: [0.1290322580645, 0.8709677419355], 0.0447450572: [0.2903225806452, 0.7096774193548], 0.1789802289: [0.4193548387097, 0.5806451612903], 0.325962539: [0.8870967741935, 0.1129032258065], 0.2174817898: [0.5161290322581, 0.4838709677419], 0.0543704475: [0.758064516129, 0.241935483871], 0.5868886576: [0.1612903225806, 0.8387096774194], 0.4289802289: [0.0806451612903, 0.9193548387097], 0.6836628512: [0.1612903225806, 0.8387096774194], 0.0304370447: [0.6612903225806, 0.3387096774194], 0.4549947971: [0.8870967741935, 0.1129032258065], 0.5639958377: [0.1290322580645, 0.8709677419355], 0.8847554631: [0.0483870967742, 0.9516129032258], 0.8376690947: [0.0322580645161, 0.9677419354839], 0.0770031217: [0.2903225806452, 0.7096774193548], 0.1540062435: [0.3225806451613, 0.6774193548387], 0.0385015609: [0.1612903225806, 0.8387096774194], 0.3829344433: [0.0645161290323, 0.9354838709677], 0.9451092612: [0.0806451612903, 0.9193548387097], 0.4797086368: [0.0645161290323, 0.9354838709677], 0.6360561915: [0.5322580645161, 0.4677419354839], 0.4110301769: [0.5161290322581, 0.4838709677419], 0.540062435: [0.5161290322581, 0.4838709677419], 0.2185223725: [0.3225806451613, 0.6774193548387], 0.1480228928: [0.6935483870968, 0.3064516129032], 0.8178980229: [0.3870967741935, 0.6129032258065], 0.5390218522: [0.9032258064516, 0.0967741935484], 0.2580645161: [0.0], 0.1290322581: [0.0], 0.5941727367: [0.7741935483871, 0.2258064516129], 0.8501560874: [0.3870967741935, 0.6129032258065], 0.2125390219: [0.6935483870968, 0.3064516129032], 0.151925078: [0.9032258064516, 0.0967741935484], 0.9919354839: [0.5], 0.0967741935: [0.0], 0.1935483871: [0.0], 0.0241935484: [0.5], 0.5757023933: [0.1451612903226, 0.8548387096774], 0.5330385016: [0.8225806451613, 0.1774193548387], 0.5442247659: [0.0645161290323, 0.9354838709677], 0.4901144641: [0.1612903225806, 0.8387096774194], 0.5223725286: [0.1612903225806, 0.8387096774194], 0.0541103018: [0.4516129032258, 0.5483870967742], 0.0845473465: [0.5645161290323, 0.4354838709677], 0.2289281998: [0.6451612903226, 0.3548387096774], 0.930541103: [0.1451612903226, 0.8548387096774], 0.8688865765: [0.258064516129, 0.741935483871], 0.7325702393: [0.9032258064516, 0.0967741935484], 0.802289282: [0.0645161290323, 0.9354838709677], 0.8699271592: [0.0322580645161, 0.9677419354839], 0.7700312175: [0.0645161290323, 0.9354838709677], 0.5403225806: [0.5], 0.6943288241: [0.8225806451613, 0.1774193548387], 0.372528616: [0.4193548387097, 0.5806451612903], 0.790062435: [0.0161290322581, 0.9838709677419], 0.9523933403: [0.8225806451613, 0.1774193548387], 0.7463579605: [0.7903225806452, 0.2096774193548], 0.9107700312: [0.6290322580645, 0.3709677419355], 0.9708636837: [0.6451612903226, 0.3548387096774], 0.6277315297: [0.4032258064516, 0.5967741935484], 0.7023933403: [0.3225806451613, 0.6774193548387], 0.0137877211: [0.0483870967742, 0.9516129032258], 0.5850676379: [0.7903225806452, 0.2096774193548], 0.7804370447: [0.1612903225806, 0.8387096774194], 0.1886056191: [0.1451612903226, 0.8548387096774], 0.4869927159: [0.6451612903226, 0.3548387096774], 0.0783038502: [0.0483870967742, 0.9516129032258], 0.8335067638: [0.1935483870968, 0.8064516129032], 0.6964099896: [0.6935483870968, 0.3064516129032], 0.9604578564: [0.3225806451613, 0.6774193548387], 0.3582206035: [0.8870967741935, 0.1129032258065], 0.5143080125: [0.6612903225806, 0.3387096774194], 0.110301769: [0.7741935483871, 0.2258064516129], 0.4422476587: [0.9032258064516, 0.0967741935484], 0.1105619147: [0.0483870967742, 0.9516129032258], 0.441207076: [0.4516129032258, 0.5483870967742], 0.436264308: [0.8225806451613, 0.1774193548387], 0.935483871: [0.0], 0.6451612903: [0.0], 0.8605619147: [0.4516129032258, 0.5483870967742], 0.2892819979: [0.0322580645161, 0.9677419354839], 0.8629032258: [0.5], 0.8657648283: [0.1935483870968, 0.8064516129032], 0.5642559834: [0.0161290322581, 0.9838709677419], 0.3152965661: [0.3225806451613, 0.6774193548387], 0.0270551509: [0.1935483870968, 0.8064516129032], 0.5078043704: [0.5161290322581, 0.4838709677419], 0.2705515088: [0.2903225806452, 0.7096774193548], 0.6506243496: [0.2741935483871, 0.7258064516129], 0.052289282: [0.5645161290323, 0.4354838709677], 0.1386576483: [0.0806451612903, 0.9193548387097], 0.087408949: [0.9032258064516, 0.0967741935484], 0.9344432882: [0.0322580645161, 0.9677419354839], 0.4443288241: [0.3225806451613, 0.6774193548387], 0.6204474506: [0.6290322580645, 0.3709677419355], 0.6266909469: [0.0483870967742, 0.9516129032258], 0.2031737773: [0.0806451612903, 0.9193548387097], 0.8126951093: [0.1612903225806, 0.8387096774194], 0.535119667: [0.6935483870968, 0.3064516129032], 0.7981269511: [0.5161290322581, 0.4838709677419], 0.7669094693: [0.3225806451613, 0.6774193548387], 0.1841831426: [0.9032258064516, 0.0967741935484], 0.8212799168: [0.4032258064516, 0.5967741935484], 0.0280957336: [0.0645161290323, 0.9354838709677], 0.6027575442: [0.758064516129, 0.241935483871], 0.2853798127: [0.1451612903226, 0.8548387096774], 0.5067637877: [0.9032258064516, 0.0967741935484], 0.603798127: [0.5322580645161, 0.4677419354839], 0.2486992716: [0.9032258064516, 0.0967741935484], 0.5299167534: [0.0483870967742, 0.9516129032258], 0.8587408949: [0.5645161290323, 0.4354838709677], 0.4914151925: [0.6290322580645, 0.3709677419355], 0.4612382934: [0.0806451612903, 0.9193548387097], 0.9492715921: [0.0483870967742, 0.9516129032258], 0.2427159209: [0.8225806451613, 0.1774193548387], 0.3444328824: [0.4516129032258, 0.5483870967742], 0.6298126951: [0.8225806451613, 0.1774193548387], 0.6607700312: [0.1290322580645, 0.8709677419355], 0.26144641: [0.8870967741935, 0.1129032258065], 0.1852237253: [0.5161290322581, 0.4838709677419], 0.7838189386: [0.0806451612903, 0.9193548387097], 0.8740894901: [0.6451612903226, 0.3548387096774], 0.9825702393: [0.4032258064516, 0.5967741935484], 0.9417273673: [0.1612903225806, 0.8387096774194], 0.7731529657: [0.0322580645161, 0.9677419354839], 0.3644640999: [0.0806451612903, 0.9193548387097], 0.4734651405: [0.4516129032258, 0.5483870967742], 0.0124869927: [0.2903225806452, 0.7096774193548], 0.2497398543: [0.5161290322581, 0.4838709677419], 0.4674817898: [0.0161290322581, 0.9838709677419], 0.0699791883: [0.2741935483871, 0.7258064516129], 0.7723725286: [0.6612903225806, 0.3387096774194], 0.6024973985: [0.4516129032258, 0.5483870967742], 0.3995837669: [0.2903225806452, 0.7096774193548], 0.493496358: [0.0806451612903, 0.9193548387097], 0.3204994797: [0.258064516129, 0.741935483871], 0.1602497399: [0.0322580645161, 0.9677419354839], 0.086628512: [0.758064516129, 0.241935483871], 0.314516129: [0.5], 0.4765868887: [0.3225806451613, 0.6774193548387], 0.8753902185: [0.7903225806452, 0.2096774193548], 0.0561914672: [0.5161290322581, 0.4838709677419], 0.2247658689: [0.0322580645161, 0.9677419354839], 0.9586368366: [0.5322580645161, 0.4677419354839], 0.3142559834: [0.5161290322581, 0.4838709677419], 0.604578564: [0.5161290322581, 0.4838709677419], 0.0033818939: [0.8870967741935, 0.1129032258065], 0.4435483871: [0.5], 0.6763787721: [0.0322580645161, 0.9677419354839], 0.5431841831: [0.1935483870968, 0.8064516129032], 0.6922476587: [0.4032258064516, 0.5967741935484], 0.296566077: [0.1612903225806, 0.8387096774194], 0.0741415193: [0.0806451612903, 0.9193548387097], 0.6753381894: [0.258064516129, 0.741935483871], 0.8636836629: [0.3225806451613, 0.6774193548387], 0.1612903226: [0.0], 0.1997918835: [0.1612903225806, 0.8387096774194], 0.0499479709: [0.4193548387097, 0.5806451612903], 0.6651925078: [0.5645161290323, 0.4354838709677], 0.1063995838: [0.0806451612903, 0.9193548387097], 0.5965140479: [0.0161290322581, 0.9838709677419], 0.1914672216: [0.258064516129, 0.741935483871], 0.4578563996: [0.1612903225806, 0.8387096774194], 0.820239334: [0.0483870967742, 0.9516129032258], 0.2258064516: [0.0], 0.1459417274: [0.8225806451613, 0.1774193548387], 0.680541103: [0.6451612903226, 0.3548387096774], 0.7252861603: [0.1290322580645, 0.8709677419355], 0.691207076: [0.0483870967742, 0.9516129032258], 0.6485431842: [0.8870967741935, 0.1129032258065], 0.7890218522: [0.4032258064516, 0.5967741935484], 0.0062434964: [0.1612903225806, 0.8387096774194], 0.0218522373: [0.4516129032258, 0.5483870967742], 0.8345473465: [0.0645161290323, 0.9354838709677], 0.5652965661: [0.8225806451613, 0.1774193548387], 0.7918834547: [0.4193548387097, 0.5806451612903], 0.8283038502: [0.4516129032258, 0.5483870967742], 0.5379812695: [0.4516129032258, 0.5483870967742], 0.8928199792: [0.4516129032258, 0.5483870967742], 0.3696670135: [0.4032258064516, 0.5967741935484], 0.9032258065: [0.0], 0.0169094693: [0.8225806451613, 0.1774193548387], 0.5733610822: [0.3225806451613, 0.6774193548387], 0.7273673257: [0.4193548387097, 0.5806451612903], 0.4089490114: [0.4516129032258, 0.5483870967742], 0.3184183143: [0.0645161290323, 0.9354838709677], 0.144640999: [0.1290322580645, 0.8709677419355], 0.3769510926: [0.758064516129, 0.241935483871], 0.4986992716: [0.4032258064516, 0.5967741935484], 0.7016129032: [0.5], 0.6399583767: [0.1935483870968, 0.8064516129032], 0.8899583767: [0.6935483870968, 0.3064516129032], 0.9854318418: [0.4193548387097, 0.5806451612903], 0.9409469303: [0.2741935483871, 0.7258064516129], 0.4432882414: [0.5161290322581, 0.4838709677419], 0.7317898023: [0.758064516129, 0.241935483871], 0.3215400624: [0.0322580645161, 0.9677419354839], 0.3457336108: [0.5322580645161, 0.4677419354839], 0.1136836629: [0.8225806451613, 0.1774193548387], 0.151144641: [0.758064516129, 0.241935483871], 0.8865764828: [0.1290322580645, 0.8709677419355], 0.9752861603: [0.6290322580645, 0.3709677419355], 0.2809573361: [0.9032258064516, 0.0967741935484], 0.371748179: [0.8225806451613, 0.1774193548387], 0.9979188345: [0.258064516129, 0.741935483871], 0.01144641: [0.3870967741935, 0.6129032258065], 0.9399063476: [0.7903225806452, 0.2096774193548], 0.7315296566: [0.4516129032258, 0.5483870967742], 0.4747658689: [0.5322580645161, 0.4677419354839], 0.6305931322: [0.4193548387097, 0.5806451612903], 0.250780437: [0.3225806451613, 0.6774193548387], 0.9531737773: [0.4193548387097, 0.5806451612903], 0.5559313215: [0.6290322580645, 0.3709677419355], 0.0884495317: [0.5161290322581, 0.4838709677419], 0.1768990635: [0.1290322580645, 0.8709677419355], 0.6755983351: [0.6612903225806, 0.3387096774194], 0.2154006244: [0.4516129032258, 0.5483870967742], 0.7419354839: [0.0], 0.9388657648: [0.8870967741935, 0.1129032258065], 0.0437044745: [0.3870967741935, 0.6129032258065], 0.6035379813: [0.9032258064516, 0.0967741935484], 0.9066077003: [0.8870967741935, 0.1129032258065], 0.6672736733: [0.758064516129, 0.241935483871], 0.0603537981: [0.0645161290323, 0.9354838709677], 0.1709157128: [0.0806451612903, 0.9193548387097], 0.337408949: [0.4032258064516, 0.5967741935484], 0.2094172737: [0.0161290322581, 0.9838709677419], 0.0075442248: [0.6290322580645, 0.3709677419355], 0.3038501561: [0.7741935483871, 0.2258064516129], 0.7637877211: [0.4516129032258, 0.5483870967742], 0.4716441207: [0.5645161290323, 0.4354838709677], 0.2354318418: [0.0806451612903, 0.9193548387097], 0.9263787721: [0.5322580645161, 0.4677419354839], 0.2978668054: [0.6290322580645, 0.3709677419355], 0.9555150885: [0.5645161290323, 0.4354838709677], 0.7075962539: [0.258064516129, 0.741935483871], 0.3093132154: [0.6935483870968, 0.3064516129032], 0.1082206035: [0.3870967741935, 0.6129032258065], 0.2164412071: [0.9032258064516, 0.0967741935484], 0.5546305931: [0.1612903225806, 0.8387096774194], 0.1279916753: [0.0322580645161, 0.9677419354839], 0.4268990635: [0.6290322580645, 0.3709677419355], 0.7898022893: [0.1290322580645, 0.8709677419355], 0.0158688866: [0.0161290322581, 0.9838709677419], 0.0707596254: [0.1612903225806, 0.8387096774194], 0.2104578564: [0.8225806451613, 0.1774193548387], 0.2799167534: [0.4516129032258, 0.5483870967742], 0.6264308012: [0.7741935483871, 0.2258064516129], 0.21566077: [0.758064516129, 0.241935483871], 0.2372528616: [0.3870967741935, 0.6129032258065], 0.3059313215: [0.1290322580645, 0.8709677419355], 0.2739334027: [0.0161290322581, 0.9838709677419], 0.0957336108: [0.0322580645161, 0.9677419354839], 0.8306451613: [0.5], 0.3779916753: [0.5322580645161, 0.4677419354839], 0.2999479709: [0.0806451612903, 0.9193548387097], 0.1022372529: [0.2741935483871, 0.7258064516129], 0.121748179: [0.3225806451613, 0.6774193548387], 0.3787721124: [0.5161290322581, 0.4838709677419], 0.9253381894: [0.758064516129, 0.241935483871], 0.4029656608: [0.0161290322581, 0.9838709677419], 0.185483871: [0.5], 0.4755463059: [0.5161290322581, 0.4838709677419], 0.8886576483: [0.4193548387097, 0.5806451612903], 0.8941207076: [0.5322580645161, 0.4677419354839], 0.6173257024: [0.7903225806452, 0.2096774193548], 0.8691467222: [0.6612903225806, 0.3387096774194], 0.2559833507: [0.258064516129, 0.741935483871], 0.5319979188: [0.0161290322581, 0.9838709677419], 0.2819979188: [0.5161290322581, 0.4838709677419], 0.4786680541: [0.1935483870968, 0.8064516129032]}
averages_odd={0.0: [0.0], 0.25: [0.5], 0.0096253902: [0.0806451612903, 0.9193548387097], 0.1688345473: [0.6290322580645, 0.3709677419355], 0.3850156087: [0.258064516129, 0.741935483871], 0.0481269511: [0.0161290322581, 0.9838709677419], 0.3530176899: [0.6612903225806, 0.3387096774194], 0.6433402706: [0.6612903225806, 0.3387096774194], 0.6077003122: [0.1935483870968, 0.8064516129032], 0.3790322581: [0.5], 0.4560353798: [0.7903225806452, 0.2096774193548], 0.8626430801: [0.5161290322581, 0.4838709677419], 0.732830385: [0.5322580645161, 0.4677419354839], 0.3933402706: [0.1612903225806, 0.8387096774194], 0.4328824142: [0.7741935483871, 0.2258064516129], 0.0044224766: [0.7903225806452, 0.2096774193548], 0.482049948: [0.6612903225806, 0.3387096774194], 0.8764308012: [0.2741935483871, 0.7258064516129], 0.7817377732: [0.6290322580645, 0.3709677419355], 0.9274193548: [0.5], 0.3506763788: [0.0645161290323, 0.9354838709677], 0.5434443288: [0.1451612903226, 0.8548387096774], 0.0322580645: [0.0], 0.1675338189: [0.1612903225806, 0.8387096774194], 0.4383454735: [0.6935483870968, 0.3064516129032], 0.0478668054: [0.1290322580645, 0.8709677419355], 0.361082206: [0.1612903225806, 0.8387096774194], 0.7221644121: [0.7096774193548, 0.2903225806452], 0.1925078044: [0.0322580645161, 0.9677419354839], 0.4640998959: [0.7096774193548, 0.2903225806452], 0.232049948: [0.1612903225806, 0.8387096774194], 0.4838709677: [0.0], 0.9635796046: [0.0645161290323, 0.9354838709677], 0.5837669095: [0.6451612903226, 0.3548387096774], 0.0356399584: [0.8870967741935, 0.1129032258065], 0.4193548387: [0.0], 0.96566077: [0.258064516129, 0.741935483871], 0.7960457856: [0.4516129032258, 0.5483870967742], 0.5902705515: [0.0806451612903, 0.9193548387097], 0.5140478668: [0.258064516129, 0.741935483871], 0.6347554631: [0.4516129032258, 0.5483870967742], 0.3051508845: [0.4032258064516, 0.5967741935484], 0.7963059313: [0.758064516129, 0.241935483871], 0.6253902185: [0.7096774193548, 0.2903225806452], 0.0835067638: [0.6935483870968, 0.3064516129032], 0.6974505723: [0.5645161290323, 0.4354838709677], 0.4341831426: [0.4032258064516, 0.5967741935484], 0.8043704475: [0.258064516129, 0.741935483871], 0.2010926119: [0.6290322580645, 0.3709677419355], 0.5080645161: [0.5], 0.9721644121: [0.7903225806452, 0.2096774193548], 0.8563995838: [0.4193548387097, 0.5806451612903], 0.0814255983: [0.8225806451613, 0.1774193548387], 0.8285639958: [0.758064516129, 0.241935483871], 0.6620707596: [0.8225806451613, 0.1774193548387], 0.3132154006: [0.9032258064516, 0.0967741935484], 0.2812174818: [0.5322580645161, 0.4677419354839], 0.3902185224: [0.6451612903226, 0.3548387096774], 0.7911030177: [0.8225806451613, 0.1774193548387], 0.7408949011: [0.0322580645161, 0.9677419354839], 0.3072320499: [0.8225806451613, 0.1774193548387], 0.8368886576: [0.6612903225806, 0.3387096774194], 0.5704994797: [0.758064516129, 0.241935483871], 0.4102497399: [0.5322580645161, 0.4677419354839], 0.4872528616: [0.8870967741935, 0.1129032258065], 0.349635796: [0.1935483870968, 0.8064516129032], 0.8012486993: [0.1935483870968, 0.8064516129032], 0.2406347555: [0.4032258064516, 0.5967741935484], 0.9791883455: [0.3870967741935, 0.6129032258065], 0.546566077: [0.6612903225806, 0.3387096774194], 0.1831425598: [0.4516129032258, 0.5483870967742], 0.3662851197: [0.3870967741935, 0.6129032258065], 0.04578564: [0.2258064516129, 0.7741935483871], 0.7140998959: [0.7903225806452, 0.2096774193548], 0.9575962539: [0.758064516129, 0.241935483871], 0.360301769: [0.2741935483871, 0.7258064516129], 0.4693028096: [0.4193548387097, 0.5806451612903], 0.8418314256: [0.6451612903226, 0.3548387096774], 0.075962539: [0.3870967741935, 0.6129032258065], 0.1238293444: [0.1935483870968, 0.8064516129032], 0.2476586889: [0.4516129032258, 0.5483870967742], 0.1771592092: [0.0161290322581, 0.9838709677419], 0.5754422477: [0.1935483870968, 0.8064516129032], 0.7286680541: [0.6935483870968, 0.3064516129032], 0.4893340271: [0.2741935483871, 0.7258064516129], 0.8618626431: [0.5322580645161, 0.4677419354839], 0.4112903226: [0.5], 0.2416753382: [0.0161290322581, 0.9838709677419], 0.6527055151: [0.6290322580645, 0.3709677419355], 0.4630593132: [0.3870967741935, 0.6129032258065], 0.5119667014: [0.0645161290323, 0.9354838709677], 0.8834547347: [0.7096774193548, 0.2903225806452], 0.0460457856: [0.0483870967742, 0.9516129032258], 0.3683662851: [0.7741935483871, 0.2258064516129], 0.3363683663: [0.0483870967742, 0.9516129032258], 0.9867325702: [0.6935483870968, 0.3064516129032], 0.064516129: [0.0], 0.3623829344: [0.6290322580645, 0.3709677419355], 0.4393860562: [0.5645161290323, 0.4354838709677], 0.8616024974: [0.9032258064516, 0.0967741935484], 0.7044745057: [0.1935483870968, 0.8064516129032], 0.8264828304: [0.5645161290323, 0.4354838709677], 0.1781997919: [0.8225806451613, 0.1774193548387], 0.6441207076: [0.0322580645161, 0.9677419354839], 0.7127991675: [0.6451612903226, 0.3548387096774], 0.3350676379: [0.2903225806452, 0.7096774193548], 0.2167013528: [0.5322580645161, 0.4677419354839], 0.9094693028: [0.1612903225806, 0.8387096774194], 0.1592091571: [0.258064516129, 0.741935483871], 0.0398022893: [0.6290322580645, 0.3709677419355], 0.5954734651: [0.4032258064516, 0.5967741935484], 0.5528095734: [0.7903225806452, 0.2096774193548], 0.3124349636: [0.758064516129, 0.241935483871], 0.5608740895: [0.7096774193548, 0.2903225806452], 0.9947970864: [0.1935483870968, 0.8064516129032], 0.6641519251: [0.6935483870968, 0.3064516129032], 0.4474505723: [0.0645161290323, 0.9354838709677], 0.2237252862: [0.258064516129, 0.741935483871], 0.7003121748: [0.9032258064516, 0.0967741935484], 0.9222164412: [0.6935483870968, 0.3064516129032], 0.4047866805: [0.4193548387097, 0.5806451612903], 0.4414672216: [0.758064516129, 0.241935483871], 0.8189386056: [0.7096774193548, 0.2903225806452], 0.9157127992: [0.7096774193548, 0.2903225806452], 0.2177419355: [0.5], 0.2624869927: [0.7903225806452, 0.2096774193548], 0.5382414152: [0.758064516129, 0.241935483871], 0.0801248699: [0.1290322580645, 0.8709677419355], 0.2885015609: [0.6612903225806, 0.3387096774194], 0.0156087409: [0.1290322580645, 0.8709677419355], 0.9014047867: [0.6612903225806, 0.3387096774194], 0.0689386056: [0.7903225806452, 0.2096774193548], 0.3915192508: [0.7903225806452, 0.2096774193548], 0.551508845: [0.6451612903226, 0.3548387096774], 0.669094693: [0.5161290322581, 0.4838709677419], 0.8785119667: [0.6290322580645, 0.3709677419355], 0.7211238293: [0.3870967741935, 0.6129032258065], 0.4175338189: [0.6612903225806, 0.3387096774194], 0.1560874089: [0.1935483870968, 0.8064516129032], 0.5881893861: [0.6290322580645, 0.3709677419355], 0.0676378772: [0.6451612903226, 0.3548387096774], 0.7721123829: [0.258064516129, 0.741935483871], 0.7151404787: [0.2741935483871, 0.7258064516129], 0.8337669095: [0.1451612903226, 0.8548387096774], 0.0249739854: [0.3225806451613, 0.6774193548387], 0.5028616025: [0.6935483870968, 0.3064516129032], 0.0998959417: [0.6451612903226, 0.3548387096774], 0.8909989594: [0.5645161290323, 0.4354838709677], 0.2382934443: [0.2903225806452, 0.7096774193548], 0.0595733611: [0.1451612903226, 0.8548387096774], 0.6888657648: [0.3870967741935, 0.6129032258065], 0.9281997919: [0.3225806451613, 0.6774193548387], 0.7232049948: [0.7741935483871, 0.2258064516129], 0.470603538: [0.6935483870968, 0.3064516129032], 0.0031217482: [0.6451612903226, 0.3548387096774], 0.1508844953: [0.4516129032258, 0.5483870967742], 0.5039021852: [0.5645161290323, 0.4354838709677], 0.9513527575: [0.0161290322581, 0.9838709677419], 0.8857960458: [0.4032258064516, 0.5967741935484], 0.9812695109: [0.7741935483871, 0.2258064516129], 0.174817898: [0.2258064516129, 0.7741935483871], 0.3176378772: [0.1451612903226, 0.8548387096774], 0.6722164412: [0.1935483870968, 0.8064516129032], 0.6828824142: [0.2741935483871, 0.7258064516129], 0.4120707596: [0.3225806451613, 0.6774193548387], 0.6402185224: [0.1451612903226, 0.8548387096774], 0.5538501561: [0.2741935483871, 0.7258064516129], 0.9846514048: [0.8225806451613, 0.1774193548387], 0.1196670135: [0.9032258064516, 0.0967741935484], 0.2393340271: [0.7741935483871, 0.2258064516129], 0.4466701353: [0.1451612903226, 0.8548387096774], 0.0926118626: [0.0645161290323, 0.9354838709677], 0.8577003122: [0.6935483870968, 0.3064516129032], 0.3384495317: [0.0161290322581, 0.9838709677419], 0.6048387097: [0.5], 0.2333506764: [0.6290322580645, 0.3709677419355], 0.2937044745: [0.8870967741935, 0.1129032258065], 0.8522372529: [0.7741935483871, 0.2258064516129], 0.1011966701: [0.7903225806452, 0.2096774193548], 0.6701352758: [0.3225806451613, 0.6774193548387], 0.1438605619: [0.4032258064516, 0.5967741935484], 0.7775754422: [0.8870967741935, 0.1129032258065], 0.4547346514: [0.6451612903226, 0.3548387096774], 0.4227367326: [0.8870967741935, 0.1129032258065], 0.7515608741: [0.0806451612903, 0.9193548387097], 0.209157128: [0.1290322580645, 0.8709677419355], 0.5463059313: [0.258064516129, 0.741935483871], 0.9209157128: [0.4193548387097, 0.5806451612903], 0.8483350676: [0.0806451612903, 0.9193548387097], 0.7866805411: [0.7096774193548, 0.2903225806452], 0.2083766909: [0.4032258064516, 0.5967741935484], 0.2757544225: [0.4193548387097, 0.5806451612903], 0.8046305931: [0.6612903225806, 0.3387096774194], 0.5962539022: [0.1290322580645, 0.8709677419355], 0.0377211238: [0.2741935483871, 0.7258064516129], 0.3017689906: [0.3870967741935, 0.6129032258065], 0.5715400624: [0.5322580645161, 0.4677419354839], 0.5109261186: [0.1935483870968, 0.8064516129032], 0.29578564: [0.2741935483871, 0.7258064516129], 0.5275754422: [0.3870967741935, 0.6129032258065], 0.946930281: [0.3870967741935, 0.6129032258065], 0.0221123829: [0.758064516129, 0.241935483871], 0.6482830385: [0.6451612903226, 0.3548387096774], 0.1449011446: [0.0161290322581, 0.9838709677419], 0.7336108221: [0.5161290322581, 0.4838709677419], 0.5796045786: [0.0322580645161, 0.9677419354839], 0.424817898: [0.2741935483871, 0.7258064516129], 0.3257023933: [0.6451612903226, 0.3548387096774], 0.6992715921: [0.4516129032258, 0.5483870967742], 0.9836108221: [0.0161290322581, 0.9838709677419], 0.6087408949: [0.0645161290323, 0.9354838709677], 0.9302809573: [0.1935483870968, 0.8064516129032], 0.2479188345: [0.758064516129, 0.241935483871], 0.0863683663: [0.4516129032258, 0.5483870967742], 0.4651404787: [0.7741935483871, 0.2258064516129], 0.2718522373: [0.0483870967742, 0.9516129032258], 0.7297086368: [0.5645161290323, 0.4354838709677], 0.8119146722: [0.2741935483871, 0.7258064516129], 0.5317377732: [0.1290322580645, 0.8709677419355], 0.1209677419: [0.5], 0.3766909469: [0.4516129032258, 0.5483870967742], 0.8054110302: [0.0322580645161, 0.9677419354839], 0.3798126951: [0.3225806451613, 0.6774193548387], 0.7096774194: [0.0], 0.7609261186: [0.6935483870968, 0.3064516129032], 0.1844432882: [0.5322580645161, 0.4677419354839], 0.737773153: [0.0645161290323, 0.9354838709677], 0.6409989594: [0.0645161290323, 0.9354838709677], 0.6118626431: [0.0322580645161, 0.9677419354839], 0.2539021852: [0.0645161290323, 0.9354838709677], 0.2070759625: [0.7741935483871, 0.2258064516129], 0.5619146722: [0.7741935483871, 0.2258064516129], 0.7856399584: [0.3870967741935, 0.6129032258065], 0.2489594173: [0.5322580645161, 0.4677419354839], 0.6818418314: [0.7903225806452, 0.2096774193548], 0.9128511967: [0.0806451612903, 0.9193548387097], 0.9334027055: [0.258064516129, 0.741935483871], 0.9927159209: [0.3225806451613, 0.6774193548387], 0.7338709677: [0.5], 0.6129032258: [0.0], 0.1594693028: [0.6612903225806, 0.3387096774194], 0.668314256: [0.5322580645161, 0.4677419354839], 0.2861602497: [0.0645161290323, 0.9354838709677], 0.6899063476: [0.7096774193548, 0.2903225806452], 0.7255463059: [0.0161290322581, 0.9838709677419], 0.5723204995: [0.5161290322581, 0.4838709677419], 0.2239854318: [0.6612903225806, 0.3387096774194], 0.5296566077: [0.7741935483871, 0.2258064516129], 0.7369927159: [0.1451612903226, 0.8548387096774], 0.1207075963: [0.5161290322581, 0.4838709677419], 0.9479708637: [0.7096774193548, 0.2903225806452], 0.2049947971: [0.3870967741935, 0.6129032258065], 0.0512486993: [0.6935483870968, 0.3064516129032], 0.9739854318: [0.1612903225806, 0.8387096774194], 0.5983350676: [0.4193548387097, 0.5806451612903], 0.0200312175: [0.5645161290323, 0.4354838709677], 0.8982830385: [0.1451612903226, 0.8548387096774], 0.4040062435: [0.8225806451613, 0.1774193548387], 0.9573361082: [0.4516129032258, 0.5483870967742], 0.7658688866: [0.5161290322581, 0.4838709677419], 0.523673257: [0.6290322580645, 0.3709677419355], 0.0593132154: [0.1935483870968, 0.8064516129032], 0.3548387097: [0.0], 0.0353798127: [0.6451612903226, 0.3548387096774], 0.7648283039: [0.9032258064516, 0.0967741935484], 0.3121748179: [0.4516129032258, 0.5483870967742], 0.9583766909: [0.9032258064516, 0.0967741935484], 0.5785639958: [0.258064516129, 0.741935483871], 0.5861082206: [0.2741935483871, 0.7258064516129], 0.2770551509: [0.6935483870968, 0.3064516129032], 0.0418834547: [0.0806451612903, 0.9193548387097], 0.1030176899: [0.1612903225806, 0.8387096774194], 0.3465140479: [0.5161290322581, 0.4838709677419], 0.4060874089: [0.6935483870968, 0.3064516129032], 0.6808012487: [0.8870967741935, 0.1129032258065], 0.6183662851: [0.2741935483871, 0.7258064516129], 0.7554630593: [0.7741935483871, 0.2258064516129], 0.6285119667: [0.1290322580645, 0.8709677419355], 0.8098335068: [0.8870967741935, 0.1129032258065], 0.9989594173: [0.0322580645161, 0.9677419354839], 0.4008844953: [0.0483870967742, 0.9516129032258], 0.1425598335: [0.2258064516129, 0.7741935483871], 0.2531217482: [0.1451612903226, 0.8548387096774], 0.8709677419: [0.0], 0.6870447451: [0.0806451612903, 0.9193548387097], 0.7346514048: [0.3225806451613, 0.6774193548387], 0.8303850156: [0.5161290322581, 0.4838709677419], 0.1365764828: [0.6290322580645, 0.3709677419355], 0.1750780437: [0.0483870967742, 0.9516129032258], 0.7877211238: [0.7741935483871, 0.2258064516129], 0.7983870968: [0.5], 0.505723205: [0.4516129032258, 0.5483870967742], 0.8449531738: [0.1612903225806, 0.8387096774194], 0.5411030177: [0.3225806451613, 0.6774193548387], 0.1123829344: [0.1290322580645, 0.8709677419355], 0.593132154: [0.7096774193548, 0.2903225806452], 0.2611862643: [0.6451612903226, 0.3548387096774], 0.2395941727: [0.0483870967742, 0.9516129032258], 0.3381893861: [0.1290322580645, 0.8709677419355], 0.5920915713: [0.3870967741935, 0.6129032258065], 0.3322060354: [0.0806451612903, 0.9193548387097], 0.0551508845: [0.9032258064516, 0.0967741935484], 0.9170135276: [0.0483870967742, 0.9516129032258], 0.3748699272: [0.5645161290323, 0.4354838709677], 0.116805411: [0.5645161290323, 0.4354838709677], 0.4672216441: [0.1290322580645, 0.8709677419355], 0.4352237253: [0.0161290322581, 0.9838709677419], 0.9250780437: [0.4516129032258, 0.5483870967742], 0.6430801249: [0.258064516129, 0.741935483871], 0.8824141519: [0.3870967741935, 0.6129032258065], 0.1761186264: [0.4032258064516, 0.5967741935484], 0.6724765869: [0.1451612903226, 0.8548387096774], 0.1571279917: [0.0645161290323, 0.9354838709677], 0.220603538: [0.1935483870968, 0.8064516129032], 0.6951092612: [0.4193548387097, 0.5806451612903], 0.3402705515: [0.4193548387097, 0.5806451612903], 0.1043184183: [0.6290322580645, 0.3709677419355], 0.4172736733: [0.258064516129, 0.741935483871], 0.6370967742: [0.5], 0.7450572321: [0.6451612903226, 0.3548387096774], 0.2216441207: [0.0645161290323, 0.9354838709677], 0.0554110302: [0.5322580645161, 0.4677419354839], 0.5944328824: [0.0483870967742, 0.9516129032258], 0.7567637877: [0.4032258064516, 0.5967741935484], 0.0634755463: [0.0322580645161, 0.9677419354839], 0.6566077003: [0.3870967741935, 0.6129032258065], 0.755723205: [0.0483870967742, 0.9516129032258], 0.2903225806: [0.0], 0.5806451613: [0.0], 0.9773673257: [0.0806451612903, 0.9193548387097], 0.9596774194: [0.5], 0.8556191467: [0.8225806451613, 0.1774193548387], 0.3475546306: [0.3225806451613, 0.6774193548387], 0.6006763788: [0.5645161290323, 0.4354838709677], 0.1966701353: [0.6451612903226, 0.3548387096774], 0.9490114464: [0.7741935483871, 0.2258064516129], 0.8064516129: [0.0], 0.3103537981: [0.5645161290323, 0.4354838709677], 0.4963579605: [0.2903225806452, 0.7096774193548], 0.7159209157: [0.1612903225806, 0.8387096774194], 0.6693548387: [0.5], 0.1521852237: [0.5322580645161, 0.4677419354839], 0.7931841831: [0.6935483870968, 0.3064516129032], 0.500780437: [0.8225806451613, 0.1774193548387], 0.8743496358: [0.8870967741935, 0.1129032258065], 0.7879812695: [0.0483870967742, 0.9516129032258], 0.1269510926: [0.258064516129, 0.741935483871], 0.8241415193: [0.4193548387097, 0.5806451612903], 0.7193028096: [0.0806451612903, 0.9193548387097], 0.1272112383: [0.6612903225806, 0.3387096774194], 0.1126430801: [0.0161290322581, 0.9838709677419], 0.8511966701: [0.7096774193548, 0.2903225806452], 0.4495317378: [0.258064516129, 0.741935483871], 0.6191467222: [0.1612903225806, 0.8387096774194], 0.4664412071: [0.4032258064516, 0.5967741935484], 0.2882414152: [0.258064516129, 0.741935483871], 0.1917273673: [0.6612903225806, 0.3387096774194], 0.9503121748: [0.4032258064516, 0.5967741935484], 0.2302289282: [0.7903225806452, 0.2096774193548], 0.4516129032: [0.0], 0.9510926119: [0.1290322580645, 0.8709677419355], 0.1727367326: [0.3870967741935, 0.6129032258065], 0.9627991675: [0.1451612903226, 0.8548387096774], 0.901144641: [0.258064516129, 0.741935483871], 0.3225806452: [0.0], 0.6350156087: [0.758064516129, 0.241935483871], 0.3394901145: [0.8225806451613, 0.1774193548387], 0.9950572321: [0.1451612903226, 0.8548387096774], 0.8951612903: [0.5], 0.1186264308: [0.4516129032258, 0.5483870967742], 0.1667533819: [0.2741935483871, 0.7258064516129], 0.4425078044: [0.5322580645161, 0.4677419354839], 0.9908949011: [0.5322580645161, 0.4677419354839], 0.7245057232: [0.4032258064516, 0.5967741935484], 0.4685223725: [0.8225806451613, 0.1774193548387], 0.6849635796: [0.6290322580645, 0.3709677419355], 0.8441727367: [0.2741935483871, 0.7258064516129], 0.2312695109: [0.2741935483871, 0.7258064516129], 0.0803850156: [0.0161290322581, 0.9838709677419], 0.9711238293: [0.8870967741935, 0.1129032258065], 0.0678980229: [0.8870967741935, 0.1129032258065], 0.1737773153: [0.2903225806452, 0.7096774193548], 0.9898543184: [0.758064516129, 0.241935483871], 0.6243496358: [0.3870967741935, 0.6129032258065], 0.7078563996: [0.6612903225806, 0.3387096774194], 0.3415712799: [0.6935483870968, 0.3064516129032], 0.4505723205: [0.0322580645161, 0.9677419354839], 0.7991675338: [0.3225806451613, 0.6774193548387], 0.4255983351: [0.1612903225806, 0.8387096774194], 0.919094693: [0.0161290322581, 0.9838709677419], 0.2715920916: [0.7741935483871, 0.2258064516129], 0.9659209157: [0.6612903225806, 0.3387096774194], 0.9981789802: [0.6612903225806, 0.3387096774194], 0.8772112383: [0.1612903225806, 0.8387096774194], 0.2656087409: [0.6290322580645, 0.3709677419355], 0.9677419355: [0.0], 0.6774193548: [0.0], 0.854578564: [0.0161290322581, 0.9838709677419], 0.1001560874: [0.8870967741935, 0.1129032258065], 0.5192507804: [0.6451612903226, 0.3548387096774], 0.0054630593: [0.2741935483871, 0.7258064516129], 0.1428199792: [0.0483870967742, 0.9516129032258], 0.394640999: [0.6290322580645, 0.3709677419355], 0.5632154006: [0.4032258064516, 0.5967741935484], 0.4141519251: [0.1935483870968, 0.8064516129032], 0.5205515088: [0.7903225806452, 0.2096774193548], 0.7773152966: [0.6451612903226, 0.3548387096774], 0.4976586889: [0.0483870967742, 0.9516129032258], 0.5975546306: [0.8225806451613, 0.1774193548387], 0.0564516129: [0.5], 0.273673257: [0.1290322580645, 0.8709677419355], 0.2458376691: [0.5645161290323, 0.4354838709677], 0.1352757544: [0.1612903225806, 0.8387096774194], 0.5673777315: [0.6935483870968, 0.3064516129032], 0.2676899063: [0.0806451612903, 0.9193548387097], 0.1883454735: [0.1935483870968, 0.8064516129032], 0.836628512: [0.258064516129, 0.741935483871], 0.0915712799: [0.1935483870968, 0.8064516129032], 0.4027055151: [0.1290322580645, 0.8709677419355], 0.1199271592: [0.5322580645161, 0.4677419354839], 0.7005723205: [0.5322580645161, 0.4677419354839], 0.8293444329: [0.9032258064516, 0.0967741935484], 0.3967221644: [0.0806451612903, 0.9193548387097], 0.02289282: [0.9032258064516, 0.0967741935484], 0.8387096774: [0.0], 0.4997398543: [0.0161290322581, 0.9838709677419], 0.4006243496: [0.7741935483871, 0.2258064516129], 0.5621748179: [0.0483870967742, 0.9516129032258], 0.7692507804: [0.1451612903226, 0.8548387096774], 0.5195109261: [0.8870967741935, 0.1129032258065], 0.3527575442: [0.258064516129, 0.741935483871], 0.7265868887: [0.8225806451613, 0.1774193548387], 0.8160770031: [0.0806451612903, 0.9193548387097], 0.1893860562: [0.0645161290323, 0.9354838709677], 0.3467741935: [0.5], 0.801508845: [0.1451612903226, 0.8548387096774], 0.4349635796: [0.1290322580645, 0.8709677419355], 0.6930280957: [0.1290322580645, 0.8709677419355], 0.3704474506: [0.1290322580645, 0.8709677419355], 0.4817898023: [0.258064516129, 0.741935483871], 0.4497918835: [0.6612903225806, 0.3387096774194], 0.1834027055: [0.758064516129, 0.241935483871], 0.9386056191: [0.6451612903226, 0.3548387096774], 0.3028095734: [0.2903225806452, 0.7096774193548], 0.8254422477: [0.6935483870968, 0.3064516129032], 0.3288241415: [0.1612903225806, 0.8387096774194], 0.0822060354: [0.4193548387097, 0.5806451612903], 0.8959417274: [0.3225806451613, 0.6774193548387], 0.5712799168: [0.9032258064516, 0.0967741935484], 0.0887096774: [0.5], 0.4318418314: [0.7096774193548, 0.2903225806452], 0.1321540062: [0.6451612903226, 0.3548387096774], 0.5392819979: [0.5322580645161, 0.4677419354839], 0.0572320499: [0.3225806451613, 0.6774193548387], 0.1144640999: [0.4193548387097, 0.5806451612903], 0.907648283: [0.7903225806452, 0.2096774193548], 0.0135275754: [0.2258064516129, 0.7741935483871], 0.6079604579: [0.1451612903226, 0.8548387096774], 0.6610301769: [0.0161290322581, 0.9838709677419], 0.7086368366: [0.0322580645161, 0.9677419354839], 0.27289282: [0.4032258064516, 0.5967741935484], 0.4953173777: [0.3870967741935, 0.6129032258065], 0.1415192508: [0.2903225806452, 0.7096774193548], 0.5338189386: [0.4193548387097, 0.5806451612903], 0.1334547347: [0.7903225806452, 0.2096774193548], 0.7661290323: [0.5], 0.401925078: [0.4032258064516, 0.5967741935484], 0.4745057232: [0.9032258064516, 0.0967741935484], 0.4099895942: [0.9032258064516, 0.0967741935484], 0.4994797086: [0.1290322580645, 0.8709677419355], 0.1979708637: [0.7903225806452, 0.2096774193548], 0.3673257024: [0.2903225806452, 0.7096774193548], 0.7453173777: [0.8870967741935, 0.1129032258065], 0.7575442248: [0.1290322580645, 0.8709677419355], 0.1404786681: [0.3870967741935, 0.6129032258065], 0.3579604579: [0.6451612903226, 0.3548387096774], 0.0894901145: [0.3225806451613, 0.6774193548387], 0.8095733611: [0.6451612903226, 0.3548387096774], 0.6599895942: [0.4032258064516, 0.5967741935484], 0.2749739854: [0.8225806451613, 0.1774193548387], 0.7596253902: [0.4193548387097, 0.5806451612903], 0.843132154: [0.7903225806452, 0.2096774193548], 0.8199791883: [0.7741935483871, 0.2258064516129], 0.1344953174: [0.2741935483871, 0.7258064516129], 0.243496358: [0.4193548387097, 0.5806451612903], 0.570239334: [0.4516129032258, 0.5483870967742], 0.8420915713: [0.8870967741935, 0.1129032258065], 0.9146722164: [0.3870967741935, 0.6129032258065], 0.3270031217: [0.7903225806452, 0.2096774193548], 0.0239334027: [0.5161290322581, 0.4838709677419], 0.1990114464: [0.2741935483871, 0.7258064516129], 0.2570239334: [0.0322580645161, 0.9677419354839], 0.6680541103: [0.9032258064516, 0.0967741935484], 0.3340270552: [0.3870967741935, 0.6129032258065], 0.0176899063: [0.4193548387097, 0.5806451612903], 0.547346514: [0.0322580645161, 0.9677419354839], 0.558012487: [0.0806451612903, 0.9193548387097], 0.3280437045: [0.2741935483871, 0.7258064516129], 0.1092611863: [0.2903225806452, 0.7096774193548], 0.0273152966: [0.1451612903226, 0.8548387096774], 0.7401144641: [0.6612903225806, 0.3387096774194], 0.1157648283: [0.6935483870968, 0.3064516129032], 0.8139958377: [0.6290322580645, 0.3709677419355], 0.9086888658: [0.2741935483871, 0.7258064516129], 0.308012487: [0.4193548387097, 0.5806451612903], 0.4570759625: [0.2741935483871, 0.7258064516129], 0.9732049948: [0.2741935483871, 0.7258064516129], 0.8233610822: [0.8225806451613, 0.1774193548387], 0.5840270552: [0.8870967741935, 0.1129032258065], 0.9021852237: [0.0322580645161, 0.9677419354839], 0.9667013528: [0.0322580645161, 0.9677419354839], 0.7741935484: [0.0], 0.9625390219: [0.1935483870968, 0.8064516129032], 0.3361082206: [0.7741935483871, 0.2258064516129], 0.3041103018: [0.0483870967742, 0.9516129032258], 0.7970863684: [0.9032258064516, 0.0967741935484], 0.3301248699: [0.6290322580645, 0.3709677419355], 0.1490634755: [0.5645161290323, 0.4354838709677], 0.482830385: [0.0322580645161, 0.9677419354839], 0.1467221644: [0.4193548387097, 0.5806451612903], 0.4331425598: [0.0483870967742, 0.9516129032258], 0.6162851197: [0.8870967741935, 0.1129032258065], 0.8535379813: [0.4032258064516, 0.5967741935484], 0.8930801249: [0.758064516129, 0.241935483871], 0.0470863684: [0.4032258064516, 0.5967741935484], 0.6357960458: [0.9032258064516, 0.0967741935484], 0.5059833507: [0.758064516129, 0.241935483871], 0.459157128: [0.6290322580645, 0.3709677419355], 0.2135796046: [0.5645161290323, 0.4354838709677], 0.2073361082: [0.0483870967742, 0.9516129032258], 0.6378772112: [0.3225806451613, 0.6774193548387], 0.9063475546: [0.6451612903226, 0.3548387096774], 0.0780437045: [0.2258064516129, 0.7741935483871], 0.2801768991: [0.758064516129, 0.241935483871], 0.8220603538: [0.1290322580645, 0.8709677419355], 0.6368366285: [0.5161290322581, 0.4838709677419], 0.3061914672: [0.0161290322581, 0.9838709677419], 0.4151925078: [0.0645161290323, 0.9354838709677], 0.5483870968: [0.0], 0.7473985432: [0.2741935483871, 0.7258064516129], 0.9430280957: [0.6290322580645, 0.3709677419355], 0.9336628512: [0.6612903225806, 0.3387096774194], 0.7047346514: [0.1451612903226, 0.8548387096774], 0.4092091571: [0.758064516129, 0.241935483871], 0.5684183143: [0.5645161290323, 0.4354838709677], 0.7544224766: [0.7096774193548, 0.2903225806452], 0.2830385016: [0.3225806451613, 0.6774193548387], 0.778616025: [0.7903225806452, 0.2096774193548], 0.3454734651: [0.9032258064516, 0.0967741935484], 0.0720603538: [0.6290322580645, 0.3709677419355], 0.2562434964: [0.6612903225806, 0.3387096774194], 0.3707075963: [0.0161290322581, 0.9838709677419], 0.9833506764: [0.1290322580645, 0.8709677419355], 0.9167533819: [0.7741935483871, 0.2258064516129], 0.2822580645: [0.5], 0.3592611863: [0.7903225806452, 0.2096774193548], 0.0793444329: [0.4032258064516, 0.5967741935484], 0.987773153: [0.5645161290323, 0.4354838709677], 0.4370447451: [0.4193548387097, 0.5806451612903], 0.3852757544: [0.6612903225806, 0.3387096774194], 0.4882934443: [0.7903225806452, 0.2096774193548], 0.8524973985: [0.0483870967742, 0.9516129032258], 0.2643080125: [0.1612903225806, 0.8387096774194], 0.6287721124: [0.0161290322581, 0.9838709677419], 0.8223204995: [0.0161290322581, 0.9838709677419], 0.6514047867: [0.1612903225806, 0.8387096774194], 0.0918314256: [0.1451612903226, 0.8548387096774], 0.0624349636: [0.258064516129, 0.741935483871], 0.9958376691: [0.0645161290323, 0.9354838709677], 0.3860561915: [0.0322580645161, 0.9677419354839], 0.4464099896: [0.1935483870968, 0.8064516129032], 0.7619667014: [0.5645161290323, 0.4354838709677], 0.0491675338: [0.8225806451613, 0.1774193548387], 0.1646722164: [0.8870967741935, 0.1129032258065], 0.6586888658: [0.7741935483871, 0.2258064516129], 0.5725806452: [0.5], 0.4737252862: [0.758064516129, 0.241935483871], 0.1240894901: [0.1451612903226, 0.8548387096774], 0.7533818939: [0.3870967741935, 0.6129032258065], 0.7130593132: [0.8870967741935, 0.1129032258065], 0.2291883455: [0.8870967741935, 0.1129032258065], 0.5150884495: [0.0322580645161, 0.9677419354839], 0.8296045786: [0.5322580645161, 0.4677419354839], 0.3777315297: [0.9032258064516, 0.0967741935484], 0.5161290323: [0.0], 0.6547866805: [0.0806451612903, 0.9193548387097], 0.7234651405: [0.0483870967742, 0.9516129032258], 0.7640478668: [0.758064516129, 0.241935483871], 0.4654006244: [0.0483870967742, 0.9516129032258], 0.5660770031: [0.4193548387097, 0.5806451612903], 0.1116024974: [0.4032258064516, 0.5967741935484], 0.5788241415: [0.6612903225806, 0.3387096774194], 0.4144120708: [0.1451612903226, 0.8548387096774], 0.866805411: [0.0645161290323, 0.9354838709677], 0.8844953174: [0.7741935483871, 0.2258064516129], 0.2060353798: [0.2903225806452, 0.7096774193548], 0.1657127992: [0.7903225806452, 0.2096774193548], 0.7494797086: [0.6290322580645, 0.3709677419355], 0.2934443288: [0.6451612903226, 0.3548387096774], 0.4973985432: [0.7741935483871, 0.2258064516129], 0.0231529657: [0.5322580645161, 0.4677419354839], 0.7650884495: [0.5322580645161, 0.4677419354839], 0.616024974: [0.6451612903226, 0.3548387096774], 0.9180541103: [0.4032258064516, 0.5967741935484], 0.6108220604: [0.258064516129, 0.741935483871], 0.5361602497: [0.5645161290323, 0.4354838709677], 0.3134755463: [0.5322580645161, 0.4677419354839], 0.4224765869: [0.6451612903226, 0.3548387096774], 0.3904786681: [0.8870967741935, 0.1129032258065], 0.1248699272: [0.0645161290323, 0.9354838709677], 0.6670135276: [0.4516129032258, 0.5483870967742], 0.7689906348: [0.1935483870968, 0.8064516129032], 0.9188345473: [0.1290322580645, 0.8709677419355], 0.2695109261: [0.3870967741935, 0.6129032258065], 0.5070239334: [0.5322580645161, 0.4677419354839], 0.0148283039: [0.4032258064516, 0.5967741935484], 0.2635275754: [0.2741935483871, 0.7258064516129], 0.186264308: [0.3225806451613, 0.6774193548387], 0.7481789802: [0.1612903225806, 0.8387096774194], 0.528616025: [0.7096774193548, 0.2903225806452], 0.7055150885: [0.0645161290323, 0.9354838709677], 0.599635796: [0.6935483870968, 0.3064516129032], 0.3985431842: [0.3870967741935, 0.6129032258065], 0.6628511967: [0.4193548387097, 0.5806451612903], 0.1188865765: [0.758064516129, 0.241935483871], 0.8878772112: [0.8225806451613, 0.1774193548387], 0.3925598335: [0.2741935483871, 0.7258064516129], 0.1802809573: [0.6935483870968, 0.3064516129032], 0.2780957336: [0.5645161290323, 0.4354838709677], 0.7796566077: [0.2741935483871, 0.7258064516129], 0.8868366285: [0.0161290322581, 0.9838709677419], 0.5517689906: [0.8870967741935, 0.1129032258065], 0.3870967742: [0.0], 0.9271592092: [0.5161290322581, 0.4838709677419], 0.9815296566: [0.0483870967742, 0.9516129032258], 0.8805931322: [0.0806451612903, 0.9193548387097], 0.5111862643: [0.1451612903226, 0.8548387096774], 0.2447970864: [0.6935483870968, 0.3064516129032], 0.6995317378: [0.758064516129, 0.241935483871], 0.9895941727: [0.4516129032258, 0.5483870967742], 0.7172216441: [0.6290322580645, 0.3709677419355], 0.3426118626: [0.5645161290323, 0.4354838709677], 0.8108740895: [0.7903225806452, 0.2096774193548], 0.9201352758: [0.8225806451613, 0.1774193548387], 0.5764828304: [0.0645161290323, 0.9354838709677], 0.6932882414: [0.0161290322581, 0.9838709677419], 0.3686264308: [0.0483870967742, 0.9516129032258], 0.6732570239: [0.0645161290323, 0.9354838709677], 0.0946930281: [0.258064516129, 0.741935483871], 0.7578043704: [0.0161290322581, 0.9838709677419], 0.1813215401: [0.5645161290323, 0.4354838709677], 0.3173777315: [0.1935483870968, 0.8064516129032], 0.9313215401: [0.0645161290323, 0.9354838709677], 0.0366805411: [0.7903225806452, 0.2096774193548], 0.7942247659: [0.5645161290323, 0.4354838709677], 0.9544745057: [0.6935483870968, 0.3064516129032], 0.0876690947: [0.5322580645161, 0.4677419354839], 0.5015608741: [0.4193548387097, 0.5806451612903], 0.9802289282: [0.7096774193548, 0.2903225806452], 0.7588449532: [0.8225806451613, 0.1774193548387], 0.3446930281: [0.758064516129, 0.241935483871], 0.1563475546: [0.1451612903226, 0.8548387096774], 0.8949011446: [0.5161290322581, 0.4838709677419], 0.9261186264: [0.9032258064516, 0.0967741935484], 0.1532258065: [0.5], 0.8314255983: [0.3225806451613, 0.6774193548387], 0.285119667: [0.1935483870968, 0.8064516129032], 0.6909469303: [0.7741935483871, 0.2258064516129], 0.9232570239: [0.5645161290323, 0.4354838709677], 0.2208636837: [0.1451612903226, 0.8548387096774], 0.7367325702: [0.1935483870968, 0.8064516129032], 0.6589490114: [0.0483870967742, 0.9516129032258], 0.418314256: [0.0322580645161, 0.9677419354839], 0.2947450572: [0.7903225806452, 0.2096774193548], 0.6495837669: [0.7903225806452, 0.2096774193548], 0.3207596254: [0.6612903225806, 0.3387096774194], 0.8462539022: [0.6290322580645, 0.3709677419355], 0.4071279917: [0.5645161290323, 0.4354838709677], 0.8990634755: [0.0645161290323, 0.9354838709677], 0.4237773153: [0.7903225806452, 0.2096774193548], 0.0626951093: [0.6612903225806, 0.3387096774194], 0.353798127: [0.0322580645161, 0.9677419354839], 0.866024974: [0.1451612903226, 0.8548387096774], 0.9594172737: [0.5161290322581, 0.4838709677419], 0.5215920916: [0.2741935483871, 0.7258064516129], 0.797346514: [0.5322580645161, 0.4677419354839], 0.6329344433: [0.5645161290323, 0.4354838709677], 0.4308012487: [0.3870967741935, 0.6129032258065], 0.6056191467: [0.3225806451613, 0.6774193548387], 0.0949531738: [0.6612903225806, 0.3387096774194], 0.1324141519: [0.8870967741935, 0.1129032258065], 0.3738293444: [0.6935483870968, 0.3064516129032], 0.2414151925: [0.1290322580645, 0.8709677419355], 0.0301768991: [0.258064516129, 0.741935483871], 0.5088449532: [0.3225806451613, 0.6774193548387], 0.2112382934: [0.4193548387097, 0.5806451612903], 0.611082206: [0.6612903225806, 0.3387096774194], 0.0189906348: [0.6935483870968, 0.3064516129032], 0.196930281: [0.8870967741935, 0.1129032258065], 0.2528616025: [0.1935483870968, 0.8064516129032], 0.5598335068: [0.3870967741935, 0.6129032258065], 0.9916753382: [0.5161290322581, 0.4838709677419], 0.7013527575: [0.5161290322581, 0.4838709677419], 0.5309573361: [0.4032258064516, 0.5967741935484], 0.5257544225: [0.0806451612903, 0.9193548387097], 0.0312174818: [0.0322580645161, 0.9677419354839], 0.9906347555: [0.9032258064516, 0.0967741935484], 0.8608220604: [0.758064516129, 0.241935483871], 0.3818938606: [0.1935483870968, 0.8064516129032], 0.3498959417: [0.1451612903226, 0.8548387096774], 0.4758064516: [0.5], 0.622528616: [0.0806451612903, 0.9193548387097], 0.3821540062: [0.1451612903226, 0.8548387096774], 0.8980228928: [0.1935483870968, 0.8064516129032], 0.7398543184: [0.258064516129, 0.741935483871], 0.4789281998: [0.1451612903226, 0.8548387096774], 0.8938605619: [0.9032258064516, 0.0967741935484], 0.1529656608: [0.5161290322581, 0.4838709677419], 0.657648283: [0.2903225806452, 0.7096774193548], 0.6318938606: [0.6935483870968, 0.3064516129032], 0.1644120708: [0.6451612903226, 0.3548387096774], 0.8543184183: [0.1290322580645, 0.8709677419355], 0.0447450572: [0.2903225806452, 0.7096774193548], 0.1789802289: [0.4193548387097, 0.5806451612903], 0.325962539: [0.8870967741935, 0.1129032258065], 0.2174817898: [0.5161290322581, 0.4838709677419], 0.0543704475: [0.758064516129, 0.241935483871], 0.5868886576: [0.1612903225806, 0.8387096774194], 0.4289802289: [0.0806451612903, 0.9193548387097], 0.6836628512: [0.1612903225806, 0.8387096774194], 0.0304370447: [0.6612903225806, 0.3387096774194], 0.4549947971: [0.8870967741935, 0.1129032258065], 0.5639958377: [0.1290322580645, 0.8709677419355], 0.8847554631: [0.0483870967742, 0.9516129032258], 0.8376690947: [0.0322580645161, 0.9677419354839], 0.0770031217: [0.2903225806452, 0.7096774193548], 0.1540062435: [0.3225806451613, 0.6774193548387], 0.0385015609: [0.1612903225806, 0.8387096774194], 0.3829344433: [0.0645161290323, 0.9354838709677], 0.9451092612: [0.0806451612903, 0.9193548387097], 0.4797086368: [0.0645161290323, 0.9354838709677], 0.6360561915: [0.5322580645161, 0.4677419354839], 0.4110301769: [0.5161290322581, 0.4838709677419], 0.540062435: [0.5161290322581, 0.4838709677419], 0.2185223725: [0.3225806451613, 0.6774193548387], 0.1480228928: [0.6935483870968, 0.3064516129032], 0.8178980229: [0.3870967741935, 0.6129032258065], 0.5390218522: [0.9032258064516, 0.0967741935484], 0.2580645161: [0.0], 0.1290322581: [0.0], 0.5941727367: [0.7741935483871, 0.2258064516129], 0.8501560874: [0.3870967741935, 0.6129032258065], 0.2125390219: [0.6935483870968, 0.3064516129032], 0.151925078: [0.9032258064516, 0.0967741935484], 0.9919354839: [0.5], 0.0967741935: [0.0], 0.1935483871: [0.0], 0.0241935484: [0.5], 0.5757023933: [0.1451612903226, 0.8548387096774], 0.5330385016: [0.8225806451613, 0.1774193548387], 0.5442247659: [0.0645161290323, 0.9354838709677], 0.4901144641: [0.1612903225806, 0.8387096774194], 0.5223725286: [0.1612903225806, 0.8387096774194], 0.0541103018: [0.4516129032258, 0.5483870967742], 0.0845473465: [0.5645161290323, 0.4354838709677], 0.2289281998: [0.6451612903226, 0.3548387096774], 0.930541103: [0.1451612903226, 0.8548387096774], 0.8688865765: [0.258064516129, 0.741935483871], 0.7325702393: [0.9032258064516, 0.0967741935484], 0.802289282: [0.0645161290323, 0.9354838709677], 0.8699271592: [0.0322580645161, 0.9677419354839], 0.7700312175: [0.0645161290323, 0.9354838709677], 0.5403225806: [0.5], 0.6943288241: [0.8225806451613, 0.1774193548387], 0.372528616: [0.4193548387097, 0.5806451612903], 0.790062435: [0.0161290322581, 0.9838709677419], 0.9523933403: [0.8225806451613, 0.1774193548387], 0.7463579605: [0.7903225806452, 0.2096774193548], 0.9107700312: [0.6290322580645, 0.3709677419355], 0.9708636837: [0.6451612903226, 0.3548387096774], 0.6277315297: [0.4032258064516, 0.5967741935484], 0.7023933403: [0.3225806451613, 0.6774193548387], 0.0137877211: [0.0483870967742, 0.9516129032258], 0.5850676379: [0.7903225806452, 0.2096774193548], 0.7804370447: [0.1612903225806, 0.8387096774194], 0.1886056191: [0.1451612903226, 0.8548387096774], 0.4869927159: [0.6451612903226, 0.3548387096774], 0.0783038502: [0.0483870967742, 0.9516129032258], 0.8335067638: [0.1935483870968, 0.8064516129032], 0.6964099896: [0.6935483870968, 0.3064516129032], 0.9604578564: [0.3225806451613, 0.6774193548387], 0.3582206035: [0.8870967741935, 0.1129032258065], 0.5143080125: [0.6612903225806, 0.3387096774194], 0.110301769: [0.7741935483871, 0.2258064516129], 0.4422476587: [0.9032258064516, 0.0967741935484], 0.1105619147: [0.0483870967742, 0.9516129032258], 0.441207076: [0.4516129032258, 0.5483870967742], 0.436264308: [0.8225806451613, 0.1774193548387], 0.935483871: [0.0], 0.6451612903: [0.0], 0.8605619147: [0.4516129032258, 0.5483870967742], 0.2892819979: [0.0322580645161, 0.9677419354839], 0.8629032258: [0.5], 0.8657648283: [0.1935483870968, 0.8064516129032], 0.5642559834: [0.0161290322581, 0.9838709677419], 0.3152965661: [0.3225806451613, 0.6774193548387], 0.0270551509: [0.1935483870968, 0.8064516129032], 0.5078043704: [0.5161290322581, 0.4838709677419], 0.2705515088: [0.2903225806452, 0.7096774193548], 0.6506243496: [0.2741935483871, 0.7258064516129], 0.052289282: [0.5645161290323, 0.4354838709677], 0.1386576483: [0.0806451612903, 0.9193548387097], 0.087408949: [0.9032258064516, 0.0967741935484], 0.9344432882: [0.0322580645161, 0.9677419354839], 0.4443288241: [0.3225806451613, 0.6774193548387], 0.6204474506: [0.6290322580645, 0.3709677419355], 0.6266909469: [0.0483870967742, 0.9516129032258], 0.2031737773: [0.0806451612903, 0.9193548387097], 0.8126951093: [0.1612903225806, 0.8387096774194], 0.535119667: [0.6935483870968, 0.3064516129032], 0.7981269511: [0.5161290322581, 0.4838709677419], 0.7669094693: [0.3225806451613, 0.6774193548387], 0.1841831426: [0.9032258064516, 0.0967741935484], 0.8212799168: [0.4032258064516, 0.5967741935484], 0.0280957336: [0.0645161290323, 0.9354838709677], 0.6027575442: [0.758064516129, 0.241935483871], 0.2853798127: [0.1451612903226, 0.8548387096774], 0.5067637877: [0.9032258064516, 0.0967741935484], 0.603798127: [0.5322580645161, 0.4677419354839], 0.2486992716: [0.9032258064516, 0.0967741935484], 0.5299167534: [0.0483870967742, 0.9516129032258], 0.8587408949: [0.5645161290323, 0.4354838709677], 0.4914151925: [0.6290322580645, 0.3709677419355], 0.4612382934: [0.0806451612903, 0.9193548387097], 0.9492715921: [0.0483870967742, 0.9516129032258], 0.2427159209: [0.8225806451613, 0.1774193548387], 0.3444328824: [0.4516129032258, 0.5483870967742], 0.6298126951: [0.8225806451613, 0.1774193548387], 0.6607700312: [0.1290322580645, 0.8709677419355], 0.26144641: [0.8870967741935, 0.1129032258065], 0.1852237253: [0.5161290322581, 0.4838709677419], 0.7838189386: [0.0806451612903, 0.9193548387097], 0.8740894901: [0.6451612903226, 0.3548387096774], 0.9825702393: [0.4032258064516, 0.5967741935484], 0.9417273673: [0.1612903225806, 0.8387096774194], 0.7731529657: [0.0322580645161, 0.9677419354839], 0.3644640999: [0.0806451612903, 0.9193548387097], 0.4734651405: [0.4516129032258, 0.5483870967742], 0.0124869927: [0.2903225806452, 0.7096774193548], 0.2497398543: [0.5161290322581, 0.4838709677419], 0.4674817898: [0.0161290322581, 0.9838709677419], 0.0699791883: [0.2741935483871, 0.7258064516129], 0.7723725286: [0.6612903225806, 0.3387096774194], 0.6024973985: [0.4516129032258, 0.5483870967742], 0.3995837669: [0.2903225806452, 0.7096774193548], 0.493496358: [0.0806451612903, 0.9193548387097], 0.3204994797: [0.258064516129, 0.741935483871], 0.1602497399: [0.0322580645161, 0.9677419354839], 0.086628512: [0.758064516129, 0.241935483871], 0.314516129: [0.5], 0.4765868887: [0.3225806451613, 0.6774193548387], 0.8753902185: [0.7903225806452, 0.2096774193548], 0.0561914672: [0.5161290322581, 0.4838709677419], 0.2247658689: [0.0322580645161, 0.9677419354839], 0.9586368366: [0.5322580645161, 0.4677419354839], 0.3142559834: [0.5161290322581, 0.4838709677419], 0.604578564: [0.5161290322581, 0.4838709677419], 0.0033818939: [0.8870967741935, 0.1129032258065], 0.4435483871: [0.5], 0.6763787721: [0.0322580645161, 0.9677419354839], 0.5431841831: [0.1935483870968, 0.8064516129032], 0.6922476587: [0.4032258064516, 0.5967741935484], 0.296566077: [0.1612903225806, 0.8387096774194], 0.0741415193: [0.0806451612903, 0.9193548387097], 0.6753381894: [0.258064516129, 0.741935483871], 0.8636836629: [0.3225806451613, 0.6774193548387], 0.1612903226: [0.0], 0.1997918835: [0.1612903225806, 0.8387096774194], 0.0499479709: [0.4193548387097, 0.5806451612903], 0.6651925078: [0.5645161290323, 0.4354838709677], 0.1063995838: [0.0806451612903, 0.9193548387097], 0.5965140479: [0.0161290322581, 0.9838709677419], 0.1914672216: [0.258064516129, 0.741935483871], 0.4578563996: [0.1612903225806, 0.8387096774194], 0.820239334: [0.0483870967742, 0.9516129032258], 0.2258064516: [0.0], 0.1459417274: [0.8225806451613, 0.1774193548387], 0.680541103: [0.6451612903226, 0.3548387096774], 0.7252861603: [0.1290322580645, 0.8709677419355], 0.691207076: [0.0483870967742, 0.9516129032258], 0.6485431842: [0.8870967741935, 0.1129032258065], 0.7890218522: [0.4032258064516, 0.5967741935484], 0.0062434964: [0.1612903225806, 0.8387096774194], 0.0218522373: [0.4516129032258, 0.5483870967742], 0.8345473465: [0.0645161290323, 0.9354838709677], 0.5652965661: [0.8225806451613, 0.1774193548387], 0.7918834547: [0.4193548387097, 0.5806451612903], 0.8283038502: [0.4516129032258, 0.5483870967742], 0.5379812695: [0.4516129032258, 0.5483870967742], 0.8928199792: [0.4516129032258, 0.5483870967742], 0.3696670135: [0.4032258064516, 0.5967741935484], 0.9032258065: [0.0], 0.0169094693: [0.8225806451613, 0.1774193548387], 0.5733610822: [0.3225806451613, 0.6774193548387], 0.7273673257: [0.4193548387097, 0.5806451612903], 0.4089490114: [0.4516129032258, 0.5483870967742], 0.3184183143: [0.0645161290323, 0.9354838709677], 0.144640999: [0.1290322580645, 0.8709677419355], 0.3769510926: [0.758064516129, 0.241935483871], 0.4986992716: [0.4032258064516, 0.5967741935484], 0.7016129032: [0.5], 0.6399583767: [0.1935483870968, 0.8064516129032], 0.8899583767: [0.6935483870968, 0.3064516129032], 0.9854318418: [0.4193548387097, 0.5806451612903], 0.9409469303: [0.2741935483871, 0.7258064516129], 0.4432882414: [0.5161290322581, 0.4838709677419], 0.7317898023: [0.758064516129, 0.241935483871], 0.3215400624: [0.0322580645161, 0.9677419354839], 0.3457336108: [0.5322580645161, 0.4677419354839], 0.1136836629: [0.8225806451613, 0.1774193548387], 0.151144641: [0.758064516129, 0.241935483871], 0.8865764828: [0.1290322580645, 0.8709677419355], 0.9752861603: [0.6290322580645, 0.3709677419355], 0.2809573361: [0.9032258064516, 0.0967741935484], 0.371748179: [0.8225806451613, 0.1774193548387], 0.9979188345: [0.258064516129, 0.741935483871], 0.01144641: [0.3870967741935, 0.6129032258065], 0.9399063476: [0.7903225806452, 0.2096774193548], 0.7315296566: [0.4516129032258, 0.5483870967742], 0.4747658689: [0.5322580645161, 0.4677419354839], 0.6305931322: [0.4193548387097, 0.5806451612903], 0.250780437: [0.3225806451613, 0.6774193548387], 0.9531737773: [0.4193548387097, 0.5806451612903], 0.5559313215: [0.6290322580645, 0.3709677419355], 0.0884495317: [0.5161290322581, 0.4838709677419], 0.1768990635: [0.1290322580645, 0.8709677419355], 0.6755983351: [0.6612903225806, 0.3387096774194], 0.2154006244: [0.4516129032258, 0.5483870967742], 0.7419354839: [0.0], 0.9388657648: [0.8870967741935, 0.1129032258065], 0.0437044745: [0.3870967741935, 0.6129032258065], 0.6035379813: [0.9032258064516, 0.0967741935484], 0.9066077003: [0.8870967741935, 0.1129032258065], 0.6672736733: [0.758064516129, 0.241935483871], 0.0603537981: [0.0645161290323, 0.9354838709677], 0.1709157128: [0.0806451612903, 0.9193548387097], 0.337408949: [0.4032258064516, 0.5967741935484], 0.2094172737: [0.0161290322581, 0.9838709677419], 0.0075442248: [0.6290322580645, 0.3709677419355], 0.3038501561: [0.7741935483871, 0.2258064516129], 0.7637877211: [0.4516129032258, 0.5483870967742], 0.4716441207: [0.5645161290323, 0.4354838709677], 0.2354318418: [0.0806451612903, 0.9193548387097], 0.9263787721: [0.5322580645161, 0.4677419354839], 0.2978668054: [0.6290322580645, 0.3709677419355], 0.9555150885: [0.5645161290323, 0.4354838709677], 0.7075962539: [0.258064516129, 0.741935483871], 0.3093132154: [0.6935483870968, 0.3064516129032], 0.1082206035: [0.3870967741935, 0.6129032258065], 0.2164412071: [0.9032258064516, 0.0967741935484], 0.5546305931: [0.1612903225806, 0.8387096774194], 0.1279916753: [0.0322580645161, 0.9677419354839], 0.4268990635: [0.6290322580645, 0.3709677419355], 0.7898022893: [0.1290322580645, 0.8709677419355], 0.0158688866: [0.0161290322581, 0.9838709677419], 0.0707596254: [0.1612903225806, 0.8387096774194], 0.2104578564: [0.8225806451613, 0.1774193548387], 0.2799167534: [0.4516129032258, 0.5483870967742], 0.6264308012: [0.7741935483871, 0.2258064516129], 0.21566077: [0.758064516129, 0.241935483871], 0.2372528616: [0.3870967741935, 0.6129032258065], 0.3059313215: [0.1290322580645, 0.8709677419355], 0.2739334027: [0.0161290322581, 0.9838709677419], 0.0957336108: [0.0322580645161, 0.9677419354839], 0.8306451613: [0.5], 0.3779916753: [0.5322580645161, 0.4677419354839], 0.2999479709: [0.0806451612903, 0.9193548387097], 0.1022372529: [0.2741935483871, 0.7258064516129], 0.121748179: [0.3225806451613, 0.6774193548387], 0.3787721124: [0.5161290322581, 0.4838709677419], 0.9253381894: [0.758064516129, 0.241935483871], 0.4029656608: [0.0161290322581, 0.9838709677419], 0.185483871: [0.5], 0.4755463059: [0.5161290322581, 0.4838709677419], 0.8886576483: [0.4193548387097, 0.5806451612903], 0.8941207076: [0.5322580645161, 0.4677419354839], 0.6173257024: [0.7903225806452, 0.2096774193548], 0.8691467222: [0.6612903225806, 0.3387096774194], 0.2559833507: [0.258064516129, 0.741935483871], 0.5319979188: [0.0161290322581, 0.9838709677419], 0.2819979188: [0.5161290322581, 0.4838709677419], 0.4786680541: [0.1935483870968, 0.8064516129032]}
|
#!/usr/bin/python
from optparse import OptionParser
import re
import os, sys
def usage(argv):
if len(argv) != 3:
print "Error: \n Usage: " + argv[0] + " wordlist directory \n"
sys.exit(1)
def ReadFromDir(directory):
#for pattern in directory:
listing = []
for fname in os.listdir(directory):
path = os.path.join(directory, fname)
if os.path.isdir(path):
continue
listing.append(path)
return listing
# Returns the contents of a file as a string
def load_file_contents(fname):
# Open the file, read the contents and close the file
f = open(fname, "r")
fcontents = f.read()
f.close()
return fcontents
# Loads a file containing the words to be found in each
# document. Returns a list of words.
def load_word_list(fname):
# Load the file
tmp = load_file_contents(fname)
# Split the contents of the file on a newline
# and return the result as an array
#print [x for x in tmp.split("\n") if x]
return [x for x in tmp.split("\n") if x]
def extractUtt(moveline):
c=moveline.split(",")
#print c
move = c[0]
time1 = c[1]
time2 = c[2]
utterance = c[3]
t= utterance.rstrip()
return t.strip()
def splitUtt(moveline):
c=moveline.split(",")
return c
def replaceText(tempfilepath, landmarks, outfilepath):
tempfile = open(tempfilepath, 'r')
with open(outfilepath, 'w') as outfile:
for line in tempfile:
myline = extractUtt(line)
c = splitUtt(line)
outfile.write(str(c[0]) +","+ str(c[1]) +","+ str(c[2])+ ", ")
for word in myline.split():
#print word
if word in landmarks:
#print str(word +" ")
outfile.write(str("[landmark] "))
else:
outfile.write(str(word)+ " ")
#print "\n"
outfile.write("\n")
outfile.close()
if __name__== "__main__":
usage(sys.argv)
#inputdir = sys.argv[1]
#incontents = load_file_contents(inputfile)
wordlist = sys.argv[1]
directory = sys.argv[2]
files = ReadFromDir(directory)
mylist = load_word_list(wordlist)
for i in files:
outfile = i + ".LM"
replaceText(i, wordlist, outfile)
|
"""MicroESP Errors
"""
__author__ = 'Oleksandr Shepetko'
__email__ = 'a@shepetko.com'
__license__ = 'MIT'
class ESP8266Error(Exception):
pass
class DeviceNotConnectedError(ESP8266Error):
pass
class DeviceCodeExecutionError(ESP8266Error):
pass
|
import os
import pytest
from api_flow.config import Config
from api_flow.flow import Flow
from api_flow.step import Step
from unittest.mock import patch
@pytest.fixture(autouse=True)
def setup():
Config.data_path = os.path.join(os.path.dirname(__file__), 'test_data')
@pytest.fixture
def mock_step_execute():
with patch.object(Step, 'execute') as mock_step_execute:
mock_step_execute.return_value = True
yield mock_step_execute
class TestFlow:
def test___init__(self):
flow = Flow('multiple_dependencies', profile='foo', profiles=['bar', 'baz'])
assert flow.flow_name == 'multiple_dependencies'
assert isinstance(flow.flow_store.multiple_dependencies, Flow)
assert len(flow.flow_dependencies) == 2
assert flow.flow_description == 'multiple_dependencies'
def test_single_dependency(self):
flow = Flow('single_dependency')
assert flow.flow_definition.depends_on == 'something_else'
assert flow.flow_description == 'my single-dependency flow'
assert len(flow.flow_dependencies) == 1
assert flow.flow_dependencies[0] == 'something_else'
def test_execute(self, mock_step_execute):
flow = Flow('single_dependency')
assert flow.execute()
assert flow.flow_dependencies_succeeded
assert flow.flow_steps_succeeded
def test_execute_dependencies_fail(self, mock_step_execute):
flow = Flow('single_dependency')
flow.flow_store.current_step = Step('bogus_step', {})
flow.flow_dependencies_succeeded = False
assert not flow.execute()
def test_execute_steps_fail(self, mock_step_execute):
flow = Flow('single_dependency')
flow.flow_dependencies_succeeded = True
mock_step_execute.return_value = False
assert not flow.execute()
def test_empty_flow_succeeds(self):
flow = Flow('empty')
assert flow.execute()
|
from dataclasses import dataclass
from enum import Enum, auto
from functools import cache
class TokenType(Enum):
# Two or more character tokens and operators
DEFINE = "="
EQUALS = "=="
NOT_EQUALS = "!="
SMALLER_OR_EQUAL_THAN = "<="
GREATER_OR_EQUAL_THAN = ">="
RANGE = ".."
# Single-character tokens and operators
LEFT_PAREN = "("
RIGHT_PAREN = ")"
LEFT_BRACE = "{"
RIGHT_BRACE = "}"
LEFT_BRACKET = "["
RIGHT_BRACKET = "]"
LEFT_POINTY_BRACKET = "<"
RIGHT_POINTY_BRACKET = ">"
COMMA = ","
DOT = "."
PLUS = "+"
MINUS = "-"
SLASH = "/"
STAR = "*"
CARET = "^"
COLON = ":"
SEMICOLON = ";"
SMALLER_THAN = "<"
GREATER_THAN = ">"
# Literal tokens
INTEGER_LITERAL = auto()
FLOAT_LITERAL = auto()
STRING_LITERAL = auto()
IDENTIFIER_LITERAL = auto()
BOOLEAN_LITERAL = auto()
KEYWORD = auto()
@staticmethod
@cache
def first_characters() -> list[str]:
return [
str(t.value)[0] for t in TokenType.__members__.values() if not isinstance(t.value, int)
]
@dataclass
class Token:
typ: TokenType
image: str
line: int
column: int
def match(self, *types: TokenType):
return self.typ in types
def __str__(self):
return (f"Token(type=<{self.typ.name}>"
f", image=`{self.image}`"
f", position=[{self.line}:{self.column}])")
def __repr__(self):
return self.__str__()
def source_position(self) -> str:
return f"{self.line}:{self.column}"
|
from .callhub import CallHub
|
"""Definition of an ElkM1 Area"""
from .const import Max, TextDescriptions
from .elements import Element, Elements
from .message import add_message_handler, as_encode, al_encode, dm_encode
class Area(Element):
"""Class representing an Area"""
def __init__(self, index, elk):
super().__init__(index, elk)
self.armed_status = None
self.arm_up_state = None
self.alarm_state = None
self.is_exit = False
self.timer1 = 0
self.timer2 = 0
def arm(self, level, code):
"""(Helper) Arm system at specified level (away, vacation, etc)"""
self._elk.send(al_encode(level, self._index, code))
def disarm(self, code):
"""(Helper) Disarm system."""
self.arm(0, code)
def display_message(self, clear, beep, timeout, line1, line2):
"""Display a message on all of the keypads in this area."""
self._elk.send(
dm_encode(self._index, clear, beep, timeout, line1, line2)
)
class Areas(Elements):
"""Handling for multiple areas"""
def __init__(self, elk):
super().__init__(elk, Area, Max.AREAS.value)
add_message_handler('AS', self._as_handler)
add_message_handler('EE', self._ee_handler)
def sync(self):
"""Retrieve areas from ElkM1"""
self.elk.send(as_encode())
self.get_descriptions(TextDescriptions.AREA.value)
def _as_handler(self, armed_statuses, arm_up_states, alarm_states):
for area in self.elements:
area.setattr('armed_status', armed_statuses[area.index], False)
area.setattr('arm_up_state', arm_up_states[area.index], False)
area.setattr('alarm_state', alarm_states[area.index], True)
# pylint: disable=too-many-arguments
def _ee_handler(self, area, is_exit, timer1, timer2, armed_status):
area = self.elements[area]
area.setattr('armed_status', armed_status, False)
area.setattr('timer1', timer1, False)
area.setattr('timer2', timer2, False)
area.setattr('is_exit', is_exit, True)
|
#!/usr/bin/env python
import numpy as np
#############################
## DEFAULT PARAMETER GRIDS ##
#############################
DEFAULT_DIFF_COEFS = np.logspace(-2.0, 2.0, 100)
DEFAULT_LOC_ERRORS = np.arange(0.0, 0.072, 0.002)
DEFAULT_HURST_PARS = np.arange(0.05, 1.0, 0.05)
###############################################
## DEFAULT PREPROCESSING AND HYPERPARAMETERS ##
###############################################
# When trajectories are too long, split them into smaller trajectories.
# *splitsize* defines the maximum trajectory length (in # jumps) before
# splitting.
DEFAULT_SPLITSIZE = 10
# Default concentration parameter for the prior distribution over state
# occupations.
DEFAULT_CONC_PARAM = 1.0
# Default number of iterations to do when inferring posterior
DEFAULT_MAX_ITER = 200
# Default first frame to consider
DEFAULT_START_FRAME = 0
# Maximum number of trajectories to consider when running state arrays
DEFAULT_SAMPLE_SIZE = 10000
################################
## DETECTION-LEVEL ATTRIBUTES ##
################################
# Column in the detections DataFrame encoding frame index
FRAME = "frame"
# Column in the detections DataFrame encoding trajectory index
TRACK = "trajectory"
# Column in the detections DataFrame encoding trajectory length (in frames)
TRACK_LENGTH = "track_length"
# Column in the detections DataFrame encoding y-position in pixels
PY = "y"
# Column in the detections DataFrame encoding x-position in pixels
PX = "x"
###########################
## JUMP-LEVEL ATTRIBUTES ##
###########################
# Column in the jumps DataFrame encoding number of frames over which
# the jump happened
DFRAMES = "dframes"
# Column in the jumps DataFrame encoding the change in y-position in microns
DY = "dy"
# Column in the jumps DataFrame encoding the change in x-position in microns
DX = "dx"
# Column in the jumps DataFrame encoding the squared 2D radial jump length
# in squared microns
DR2 = "dr2"
# Column in the jumps DataFrame encoding the number of jumps per trajectory
JUMPS_PER_TRACK = "jumps_per_track"
####################################
## AVAILABLE LIKELIHOOD FUNCTIONS ##
####################################
# Names of likelihood functions
RBME = "rbme"
RBME_MARGINAL = "rbme_marginal"
GAMMA = "gamma"
FBME = "fbme"
# All available likelihood functions
LIKELIHOOD_TYPES = [RBME, RBME_MARGINAL, GAMMA, FBME]
###########
## OTHER ##
###########
# Bucket condition column for StateArrayDatasets without an experimental condition
DEFAULT_CONDITION_COL = "default_condition"
DEFAULT_CONDITION = "no_condition"
|
from __future__ import unicode_literals
import frappe
def execute():
if frappe.db.exists("DocType", "Membership"):
if 'webhook_payload' in frappe.db.get_table_columns("Membership"):
frappe.db.sql("alter table `tabMembership` drop column webhook_payload")
|
# test_getopt.py
# David Goodger <dgoodger@bigfoot.com> 2000-08-19
import getopt
from getopt import GetoptError
from test_support import verbose
def expectException(teststr, expected, failure=AssertionError):
"""Executes a statement passed in teststr, and raises an exception
(failure) if the expected exception is *not* raised."""
try:
exec teststr
except expected:
pass
else:
raise failure
if verbose:
print 'Running tests on getopt.short_has_arg'
assert getopt.short_has_arg('a', 'a:')
assert not getopt.short_has_arg('a', 'a')
expectException("tmp = getopt.short_has_arg('a', 'b')", GetoptError)
expectException("tmp = getopt.short_has_arg('a', '')", GetoptError)
if verbose:
print 'Running tests on getopt.long_has_args'
has_arg, option = getopt.long_has_args('abc', ['abc='])
assert has_arg
assert option == 'abc'
has_arg, option = getopt.long_has_args('abc', ['abc'])
assert not has_arg
assert option == 'abc'
has_arg, option = getopt.long_has_args('abc', ['abcd'])
assert not has_arg
assert option == 'abcd'
expectException("has_arg, option = getopt.long_has_args('abc', ['def'])",
GetoptError)
expectException("has_arg, option = getopt.long_has_args('abc', [])",
GetoptError)
expectException("has_arg, option = " + \
"getopt.long_has_args('abc', ['abcd','abcde'])",
GetoptError)
if verbose:
print 'Running tests on getopt.do_shorts'
opts, args = getopt.do_shorts([], 'a', 'a', [])
assert opts == [('-a', '')]
assert args == []
opts, args = getopt.do_shorts([], 'a1', 'a:', [])
assert opts == [('-a', '1')]
assert args == []
#opts, args = getopt.do_shorts([], 'a=1', 'a:', [])
#assert opts == [('-a', '1')]
#assert args == []
opts, args = getopt.do_shorts([], 'a', 'a:', ['1'])
assert opts == [('-a', '1')]
assert args == []
opts, args = getopt.do_shorts([], 'a', 'a:', ['1', '2'])
assert opts == [('-a', '1')]
assert args == ['2']
expectException("opts, args = getopt.do_shorts([], 'a1', 'a', [])",
GetoptError)
expectException("opts, args = getopt.do_shorts([], 'a', 'a:', [])",
GetoptError)
if verbose:
print 'Running tests on getopt.do_longs'
opts, args = getopt.do_longs([], 'abc', ['abc'], [])
assert opts == [('--abc', '')]
assert args == []
opts, args = getopt.do_longs([], 'abc=1', ['abc='], [])
assert opts == [('--abc', '1')]
assert args == []
opts, args = getopt.do_longs([], 'abc=1', ['abcd='], [])
assert opts == [('--abcd', '1')]
assert args == []
expectException("opts, args = getopt.do_longs([], 'abc=1', ['abc'], [])",
GetoptError)
expectException("opts, args = getopt.do_longs([], 'abc', ['abc='], [])",
GetoptError)
# note: the empty string between '-a' and '--beta' is significant:
# it simulates an empty string option argument ('-a ""') on the command line.
cmdline = ['-a', '1', '-b', '--alpha=2', '--beta', '-a', '3', '-a', '',
'--beta', 'arg1', 'arg2']
if verbose:
print 'Running tests on getopt.getopt'
opts, args = getopt.getopt(cmdline, 'a:b', ['alpha=', 'beta'])
assert opts == [('-a', '1'), ('-b', ''), ('--alpha', '2'), ('--beta', ''),
('-a', '3'), ('-a', ''), ('--beta', '')]
# Note ambiguity of ('-b', '') and ('-a', '') above. This must be
# accounted for in the code that calls getopt().
assert args == ['arg1', 'arg2']
expectException(
"opts, args = getopt.getopt(cmdline, 'a:b', ['alpha', 'beta'])",
GetoptError)
if verbose:
print "Module getopt: tests completed successfully."
|
from rlagent.noises.ounoise import OUNoise
|
#!/usr/bin/env python
#pylint: skip-file
# This source code is licensed under the Apache license found in the
# LICENSE file in the root directory of this project.
class PolicyApplication(object):
def __init__(self):
"""
Attributes:
swaggerTypes (dict): The key is attribute name and the value is attribute type.
attributeMap (dict): The key is attribute name and the value is json key in definition.
"""
self.swaggerTypes = {
'raw': 'str',
'trafficClass': 'str',
'stale': 'bool',
'id': 'str',
'appName': 'str'
}
self.attributeMap = {
'raw': 'raw',
'trafficClass': 'trafficClass',
'stale': 'stale',
'id': 'id',
'appName': 'appName'
}
#Either raw Application of the form port:protocol should be specified or appId should be specified
self.raw = None # str
#Traffic class to which the app belongs
self.trafficClass = None # str
#Indicates whether the application has been updated since the last time this policy was provisioned
self.stale = None # bool
#id
self.id = None # str
self.appName = None # str
|
from django.http import HttpResponse
def index(request):
result = "<h1>welcome to my site</h1>"
return HttpResponse(result)
|
import platform
import os
def mkdir(path):
e = os.path.exists(path)
if not e:
os.makedirs(path)
return True
else:
return False
def mkfile(filePath):
pipfile = "[global]\ntrusted-host=mirrors.aliyun.com\nindex-url=http://mirrors.aliyun.com/pypi/simple/"
if os.path.exists(filePath):
if str(input("File exist!Cover?(Y/N))")).upper() == 'N':
print("Not Cover.")
return
with open(filePath, 'w') as fp:
fp.write(pipfile)
print("Write finish.")
def change_pypi_source():
systype = platform.system()
print("System type: " + systype)
if systype == "Windows":
path = os.path.join(os.getenv('HOMEPATH'), 'pip')
mkdir(path)
mkfile(os.path.join(path, 'pip.ini'))
elif systype == "Linux" or systype == "Darwin":
path = os.path.join(os.path.expandvars('$HOME'), ".pip")
mkdir(path)
mkfile(os.path.join(path, 'pip.conf'))
else:
print("System type: " + systype + " Not Support!")
|
from rect import Rect
class Bomb(Rect):
Bombs = []
def __init__(self, x, y):
self.total_frames = 0
super(Bomb, self).__init__(x, y, 32, 32)
Bomb.Bombs.append(self)
|
import pytest
@pytest.fixture
def mock_execution_context(mocker):
mock_context = mocker.Mock()
mock_context.parameters.database = "test_database"
mock_context.parameters.model_version_id = 12345
return mock_context
@pytest.fixture
def mock_ezfuncs(mocker):
return mocker.patch("cascade.core.db.ezfuncs")
@pytest.fixture
def mock_database_access(mock_ezfuncs):
return {"cursor": mock_ezfuncs.get_connection().cursor(), "connection": mock_ezfuncs.get_connetion()}
|
##############################################################################
# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
import subprocess
import os
import collections
import logging
import tempfile
import six
import pkg_resources
from yardstick import ssh
from yardstick.benchmark import contexts
from yardstick.benchmark.contexts.base import Context
from yardstick.common.constants import ANSIBLE_DIR, YARDSTICK_ROOT_PATH
from yardstick.common.ansible_common import AnsibleCommon
from yardstick.common.exceptions import ContextUpdateCollectdForNodeError
LOG = logging.getLogger(__name__)
DEFAULT_DISPATCH = 'script'
class NodeContext(Context):
"""Class that handle nodes info"""
__context_type__ = contexts.CONTEXT_NODE
def __init__(self):
self.file_path = None
self.nodes = []
self.networks = {}
self.controllers = []
self.computes = []
self.baremetals = []
self.env = {}
self.attrs = {}
self.DISPATCH_TYPES = {
"ansible": self._dispatch_ansible,
"script": self._dispatch_script,
}
super(NodeContext, self).__init__()
def init(self, attrs):
"""initializes itself from the supplied arguments"""
super(NodeContext, self).init(attrs)
cfg = self.read_pod_file(attrs)
self.env = attrs.get('env', {})
self.attrs = attrs
LOG.debug("Env: %r", self.env)
# add optional static network definition
self.networks.update(cfg.get("networks", {}))
def deploy(self):
config_type = self.env.get('type', DEFAULT_DISPATCH)
self.DISPATCH_TYPES[config_type]("setup")
def undeploy(self):
config_type = self.env.get('type', DEFAULT_DISPATCH)
self.DISPATCH_TYPES[config_type]("teardown")
super(NodeContext, self).undeploy()
def _dispatch_script(self, key):
steps = self.env.get(key, [])
for step in steps:
for host, info in step.items():
self._execute_script(host, info)
def _dispatch_ansible(self, key):
try:
playbooks = self.env[key]
except KeyError:
pass
else:
self._do_ansible_job(playbooks)
def _do_ansible_job(self, playbooks):
self.ansible_exec = AnsibleCommon(nodes=self.nodes,
test_vars=self.env)
# playbooks relative to ansible dir
# playbooks can also be a list of playbooks
self.ansible_exec.gen_inventory_ini_dict()
if isinstance(playbooks, six.string_types):
playbooks = [playbooks]
playbooks = [self.fix_ansible_path(playbook) for playbook in playbooks]
tmpdir = tempfile.mkdtemp(prefix='ansible-')
self.ansible_exec.execute_ansible(playbooks, tmpdir,
verbose=self.env.get("verbose",
False))
def fix_ansible_path(self, playbook):
if not os.path.isabs(playbook):
# make relative paths absolute in ANSIBLE_DIR
playbook = os.path.join(ANSIBLE_DIR, playbook)
return playbook
def _get_physical_nodes(self):
return self.nodes
def _get_physical_node_for_server(self, server_name):
node_name, context_name = self.split_host_name(server_name)
if context_name is None or self.name != context_name:
return None
for n in (n for n in self.nodes if n["name"] == node_name):
return "{}.{}".format(n["name"], self._name)
return None
def update_collectd_options_for_node(self, options, attr_name):
node_name, _ = self.split_host_name(attr_name)
matching_nodes = (n for n in self.nodes if n["name"] == node_name)
try:
node = next(matching_nodes)
except StopIteration:
raise ContextUpdateCollectdForNodeError(attr_name=attr_name)
node["collectd"] = options
def _get_server(self, attr_name):
"""lookup server info by name from context
attr_name: a name for a server listed in nodes config file
"""
node_name, name = self.split_host_name(attr_name)
if name is None or self.name != name:
return None
matching_nodes = (n for n in self.nodes if n["name"] == node_name)
try:
# A clone is created in order to avoid affecting the
# original one.
node = dict(next(matching_nodes))
except StopIteration:
return None
try:
duplicate = next(matching_nodes)
except StopIteration:
pass
else:
raise ValueError("Duplicate nodes!!! Nodes: %s %s" %
(node, duplicate))
node["name"] = attr_name
node.setdefault("interfaces", {})
return node
def _get_network(self, attr_name):
if not isinstance(attr_name, collections.Mapping):
network = self.networks.get(attr_name)
else:
# Don't generalize too much Just support vld_id
vld_id = attr_name.get('vld_id', {})
# for node context networks are dicts
iter1 = (n for n in self.networks.values() if n.get('vld_id') == vld_id)
network = next(iter1, None)
if network is None:
return None
result = {
# name is required
"name": network["name"],
"vld_id": network.get("vld_id"),
"segmentation_id": network.get("segmentation_id"),
"network_type": network.get("network_type"),
"physical_network": network.get("physical_network"),
}
return result
def _execute_script(self, node_name, info):
if node_name == 'local':
self._execute_local_script(info)
else:
self._execute_remote_script(node_name, info)
def _execute_remote_script(self, node_name, info):
prefix = self.env.get('prefix', '')
script, options = self._get_script(info)
script_file = pkg_resources.resource_filename(prefix, script)
self._get_client(node_name)
self.client._put_file_shell(script_file, '~/{}'.format(script))
cmd = 'sudo bash {} {}'.format(script, options)
status, _, stderr = self.client.execute(cmd)
if status:
raise RuntimeError(stderr)
def _execute_local_script(self, info):
script, options = self._get_script(info)
script = os.path.join(YARDSTICK_ROOT_PATH, script)
cmd = ['bash', script, options]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
LOG.debug('\n%s', p.communicate()[0])
def _get_script(self, info):
return info.get('script'), info.get('options', '')
def _get_client(self, node_name):
node = self._get_node_info(node_name.strip())
if node is None:
raise SystemExit('No such node')
self.client = ssh.SSH.from_node(node, defaults={'user': 'ubuntu'})
self.client.wait(timeout=600)
def _get_node_info(self, name):
return next((n for n in self.nodes if n['name'].strip() == name))
|
from unittest import TestCase
from packaging.version import Version
from packaging.specifiers import SpecifierSet
from pyrrot import Pyrrot
class TestIsOld(TestCase):
def setUp(self):
self.l = Version('2.0.0')
def test_equals(self):
specs = SpecifierSet('==1.0.0')
self.assertTrue(Pyrrot.is_old(self.l, specs))
latest = Version('1.0.0')
self.assertFalse(Pyrrot.is_old(latest, specs))
def test_less_than(self):
specs = SpecifierSet('<1.0.0')
self.assertTrue(Pyrrot.is_old(self.l, specs))
specs = SpecifierSet('<2.0.0')
self.assertTrue(Pyrrot.is_old(self.l, specs))
specs = SpecifierSet('<3.0.0')
self.assertFalse(Pyrrot.is_old(self.l, specs))
def test_less_or_equal(self):
specs = SpecifierSet('<=1.0.0')
self.assertTrue(Pyrrot.is_old(self.l, specs))
specs = SpecifierSet('<=2.0.0')
self.assertFalse(Pyrrot.is_old(self.l, specs))
def test_greaters(self):
specs = SpecifierSet('>1.0.0')
self.assertFalse(Pyrrot.is_old(self.l, specs))
# FIXME: latest is older than our requirements
specs = SpecifierSet('>2.0.0')
self.assertFalse(Pyrrot.is_old(self.l, specs))
specs = SpecifierSet('>=1.0.0')
self.assertFalse(Pyrrot.is_old(self.l, specs))
specs = SpecifierSet('>=2.0.0')
self.assertFalse(Pyrrot.is_old(self.l, specs))
|
from abaqusConstants import *
class OdbDataFrame:
"""The OdbDataFrame object.
Notes
-----
This object can be accessed by:
.. code-block:: python
import visualization
session.odbData[name].steps[i].frames[i]
"""
def setValues(self, activateFrame: Boolean, update: Boolean = OFF):
"""This method modifies the OdbDataFrame object.
Parameters
----------
activateFrame
A Boolean specifying whether to activate the frame.
update
A Boolean specifying whether to update the model. The default value is ON
"""
pass
|
from fastapi import FastAPI
from routes.student import student_router
import uvicorn
import os
app = FastAPI()
# uvicorn main:app --reload
# Register routes
app.include_router(student_router)
if __name__ == '__main__':
host = "127.0.0.1"
port = 5000
os.system("start \"\" http://" + host + ":" + str(port) + "/docs")
uvicorn.run("main:app", host=host, port=port, reload=True)
|
import pytest
from pyschieber.rules.count_rules import counting_factor
from pyschieber.deck import Deck
from pyschieber.trumpf import Trumpf
from pyschieber.player.random_player import RandomPlayer
from pyschieber.game import Game, get_player_index
from pyschieber.team import Team
@pytest.mark.parametrize("start_key, last_key", [
(0, 3),
(1, 0),
(2, 1),
(3, 2),
])
def test_get_player_key(start_key, last_key):
key = 0
count = 0
for i in get_player_index(start_key):
key = i
count += 1
assert count == 3
assert last_key == key
def test_game():
random_players = [RandomPlayer(name=i) for i in range(4)]
team_1 = Team(players=[random_players[0], random_players[1]])
team_2 = Team(players=[random_players[1], random_players[2]])
teams = [team_1, team_2]
game = Game(teams=teams, point_limit=1500)
game.play()
for player in random_players:
assert len(player.cards) == 0
@pytest.mark.parametrize("start_key, next_key", [
(0, 1),
(1, 2),
(2, 3),
(3, 0),
])
def test_get_player_index(start_key, next_key):
generator = get_player_index(start_index=start_key)
current_key = next(generator)
assert current_key == next_key
@pytest.mark.parametrize("trumpf", list(Trumpf)[:6])
def test_add_points(trumpf):
round_points = 152
deck = Deck()
random_players = [RandomPlayer(name=i) for i in range(4)]
team_1 = Team(players=[random_players[0], random_players[1]])
team_2 = Team(players=[random_players[1], random_players[2]])
teams = [team_1, team_2]
game = Game(teams=teams, use_counting_factor=True)
game.trumpf = trumpf
game.add_points(team_index=0, cards=deck.cards, last=False)
assert team_1.points == round_points * counting_factor[trumpf]
game.use_counting_factor = False
game.add_points(team_index=1, cards=deck.cards, last=False)
assert team_2.points == round_points
|
import PILasOPENCV as Image
# from PIL import Image
#
img1 = Image.open('Images/cat.jpg')
img2 = Image.open('Images/landscape.jpg').resize(img1.size)
mask = Image.open('Images/mask1.jpg')
mask = mask.resize(img1.size)
#
im_new1 = Image.composite(img1, img2, mask)
im_new1.show()
img1 = Image.open('Images/cat.jpg')
img2 = Image.open('Images/landscape.jpg').resize(img1.size)
mask = Image.open('Images/mask2.jpg')
mask = mask.resize(img1.size)
#
im_new2 = Image.composite(img1, img2, mask)
im_new2.show()
img1 = Image.open('Images/cat.jpg')
img2 = Image.open('Images/landscape.jpg').resize(img1.size)
mask = Image.open('Images/mask3.jpg')
mask = mask.resize(img1.size)
#
im_new3 = Image.composite(img1, img2, mask)
im_new3.show()
|
from flask_restplus import Resource, Namespace
from app.extensions import api as app_api
from app.api.utils.access_decorators import requires_role_mine_view, requires_role_mine_create
class DummyResource(Resource):
@requires_role_mine_view
def get(self):
return "Example view method"
@requires_role_mine_create
def post(self):
return "Example create method"
api = Namespace('test')
api.add_resource(DummyResource, '')
app_api.add_namespace(api)
# Test view role
def test_get_no_auth(test_client):
resp = test_client.get('/test', headers={})
assert resp.status_code == 401
def test_get_view_only(test_client, auth_headers):
resp = test_client.get('/test', headers=auth_headers['view_only_auth_header'])
assert resp.status_code == 200
def test_get_full_auth(test_client, auth_headers):
resp = test_client.get('/test', headers=auth_headers['full_auth_header'])
assert resp.status_code == 200
# Test create role
def test_post_no_auth(test_client):
resp = test_client.post('/test', headers={})
assert resp.status_code == 401
def test_post_view_only(test_client, auth_headers):
resp = test_client.post('/test', headers=auth_headers['view_only_auth_header'])
assert resp.status_code == 401
def test_post_full_auth(test_client, auth_headers):
resp = test_client.post('/test', headers=auth_headers['full_auth_header'])
assert resp.status_code == 200
|
#
# @lc app=leetcode id=1178 lang=python3
#
# [1178] Number of Valid Words for Each Puzzle
#
# @lc code=start
class Solution:
def findNumOfValidWords(self, words: List[str], puzzles: List[str]) -> List[int]:
ans = []
freq = dict()
for word in words:
mask = 0
for c in word:
mask = mask | (1 << (ord(c) - 97))
if mask not in freq:
freq[mask] = 0
freq[mask] += 1
for p in puzzles:
total = 0
l = len(p) - 1
for i in range(0, 1 << l):
mask = 1 << (ord(p[0]) - 97)
for j in range(0, l):
if i & (1 << j):
mask = mask | (1 << (ord(p[j + 1]) - 97))
if mask in freq:
total += freq[mask]
ans.append(total)
return ans
# @lc code=end
|
# -*- coding: utf-8 -*-
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Task to analyse Hadoop AppRoot files."""
from __future__ import unicode_literals
import codecs
import logging
import os
import subprocess
from turbinia import TurbiniaException
from turbinia.lib import text_formatter as fmt
from turbinia.evidence import ReportText
from turbinia.lib.utils import extract_artifacts
from turbinia.workers import TurbiniaTask
from turbinia.workers import Priority
log = logging.getLogger('turbinia')
class HadoopAnalysisTask(TurbiniaTask):
"""Task to analyse Hadoop AppRoot files."""
def _AnalyzeHadoopAppRoot(self, collected_artifacts, output_dir):
"""Runs a naive AppRoot files parsing method.
This extracts strings from the saved task file, and searches for usual
post-compromise suspicious patterns.
TODO: properly parse the Proto. Some documentation can be found over there:
https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23.7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
Args:
collected_artifacts(list(str)): a list of paths to extracted files
output_dir(str): The base directory the artfacts are in.
Returns:
Tuple(
list(str): The report data as a list of lines
report_priority(int): The priority of the report (0 - 100)
summary(str): A summary of the report (used for task status)
)
"""
report = []
evil_commands = []
strings_count = 0
priority = Priority.MEDIUM
summary = ''
for filepath in collected_artifacts:
relpath = os.path.relpath(filepath, output_dir)
command = 'strings -a "{0:s}"'.format(filepath)
log.debug('Running command [{0:s}]'.format(command))
proc = subprocess.Popen(
command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
strings_output, _ = proc.communicate()
strings_output = codecs.decode(strings_output, 'utf-8')
for line in strings_output.splitlines():
strings_count += 1
if (line.find('curl') >= 0) or (line.find('wget') >= 0):
evil_commands.append((relpath, line))
if evil_commands:
msg = 'Found suspicious commands!'
report.append(fmt.heading4(fmt.bold(msg)))
summary = msg
priority = Priority.CRITICAL
else:
msg = 'Did not find any suspicious commands.'
report.append(fmt.heading4(msg))
summary = msg
for filepath, command in evil_commands:
report.append(fmt.bullet(fmt.bold('Command:')))
report.append(fmt.code(command))
report.append('Found in file:')
report.append(fmt.code(filepath))
msg = 'Extracted {0:d} strings from {1:d} file(s)'.format(
strings_count, len(collected_artifacts))
report.append(fmt.bullet(msg))
return (report, priority, summary)
def run(self, evidence, result):
"""Run Hadoop specific analysis on the evidences.
Args:
evidence (Evidence object): The evidence we will process
result (TurbiniaTaskResult): The object to place task results into.
Returns:
TurbiniaTaskResult object.
"""
# What type of evidence we should output.
output_evidence = ReportText()
# Where to store the resulting output file.
output_file_name = 'hadoop_analysis.txt'
output_file_path = os.path.join(self.output_dir, output_file_name)
output_evidence.local_path = output_file_path
try:
# We don't use FileArtifactExtractionTask as it export one evidence per
# file extracted
output_dir = os.path.join(self.output_dir, 'artifacts')
collected_artifacts = extract_artifacts(
artifact_names=['HadoopAppRoot'], disk_path=evidence.local_path,
output_dir=output_dir)
(report, priority, summary) = self._AnalyzeHadoopAppRoot(
collected_artifacts, output_dir)
if not report:
raise TurbiniaException(
'Report generated by _AnalyzeHadoopAppRoot() is empty')
output_evidence.text_data = '\n'.join(report)
result.report_data = output_evidence.text_data
# Write the report to the output file.
with open(output_file_path, 'wb') as fh:
fh.write(output_evidence.text_data.encode('utf8'))
fh.write('\n'.encode('utf8'))
result.add_evidence(output_evidence, evidence.config)
result.report_priority = priority
result.close(self, success=True, status=summary)
except TurbiniaException as e:
result.close(self, success=False, status=str(e))
return result
return result
|
# Copyright (c) 2020 The FedVision Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pickle
from typing import Optional
import grpc
from paddle import fluid
from fedvision.framework.utils.logger import Logger
from fedvision.paddle_fl.protobuf import scheduler_pb2_grpc, scheduler_pb2
from paddle_fl.paddle_fl.core.master.fl_job import FLJobBase
class TrainerSchedulerAgent(Logger):
def __init__(self, worker_name, scheduler_ep):
self._worker_name = worker_name
self._scheduler_ep = scheduler_ep
self._channel: Optional[grpc.Channel] = None
self._stub: Optional[scheduler_pb2_grpc.SchedulerStub] = None
def start_channel(self):
self._channel = grpc.insecure_channel(self._scheduler_ep)
self._stub = scheduler_pb2_grpc.SchedulerStub(self._channel)
self.debug(f"waiting channel ready")
future = grpc.channel_ready_future(self._channel)
future.result()
self.debug(f"channel ready")
return self
def init_worker(self):
self.debug(f"start to init")
self._stub.Init(scheduler_pb2.Init.REQ(name=self._worker_name))
self.debug(f"init success")
def join(self, step: int):
self.debug("start to join")
response = self._stub.WorkerJoin(
scheduler_pb2.WorkerJoin.REQ(name=self._worker_name, step=step)
)
self.debug(f"join success: {response.status}")
return response.status == scheduler_pb2.WorkerJoin.ACCEPT
def finish(self):
self.debug("start to finish")
status = self._stub.WorkerFinish(
scheduler_pb2.WorkerFinish.REQ(name=self._worker_name)
)
self.debug(f"finish success: {status}")
return status == scheduler_pb2.WorkerFinish.DONE
def close(self):
self._channel.close()
class FedAvgTrainer(FLJobBase):
def __init__(self, scheduler_ep, trainer_ep):
self._logger = logging.getLogger("FLTrainer")
super(FedAvgTrainer, self).__init__()
self._scheduler_ep = scheduler_ep
self._trainer_ep = trainer_ep
self.scheduler_agent: Optional[TrainerSchedulerAgent] = None
self.exe: Optional[fluid.Executor] = None
self.cur_step = 0
def start(self, place):
self.scheduler_agent = TrainerSchedulerAgent(
scheduler_ep=self._scheduler_ep, worker_name=self._trainer_ep
)
self.scheduler_agent.start_channel()
self.scheduler_agent.init_worker()
self.exe = fluid.Executor(place)
self.exe.run(self._startup_program)
def load_job(
self,
startup_program: str,
main_program: str,
send_program: str,
recv_program: str,
feed_names: str,
target_names: str,
strategy: str,
):
self._startup_program = self._load_program(startup_program)
self._main_program = self._load_program(main_program)
self._send_program = self._load_program(send_program)
self._recv_program = self._load_program(recv_program)
self._step = self._load_strategy(strategy)._inner_step
self._feed_names = self._load_str_list(feed_names)
self._target_names = self._load_str_list(target_names)
def load_feed_list(self, feeds_path):
data = []
with open(feeds_path, "rb") as f:
num = pickle.load(f)
for _ in range(num):
data.append(fluid.data(**pickle.load(f)))
return data
@staticmethod
def _load_strategy(input_file):
return pickle.load(open(input_file, "rb"))
def reset(self):
self.cur_step = 0
def run_with_epoch(self, reader, feeder, fetch, num_epoch):
self._logger.debug("begin to run recv program")
self.exe.run(self._recv_program)
self._logger.debug("recv done")
epoch = 0
for i in range(num_epoch):
for data in reader():
acc = self.exe.run(
self._main_program, feed=feeder.feed(data), fetch_list=fetch
)
print(f"acc: {acc}")
self.cur_step += 1
epoch += 1
self._logger.debug("begin to run send program")
self.exe.run(self._send_program)
def run(self, feed, fetch):
self._logger.debug(
f"begin to run FedAvgTrainer, cur_step={self.cur_step}, inner_step={self._step}"
)
if self.cur_step % self._step == 0:
self._logger.debug("run recv program start")
self.exe.run(self._recv_program)
self._logger.debug("run recv program done")
self._logger.debug("run main program start")
loss = self.exe.run(self._main_program, feed=feed, fetch_list=fetch)
self._logger.debug("run main program done")
if self.cur_step % self._step == 0:
self._logger.debug("run send program start")
self.exe.run(self._send_program)
self._logger.debug("run send program done")
self.cur_step += 1
return loss
def save_model(self, model_path):
fluid.io.save_inference_model(
dirname=model_path,
feeded_var_names=self._feed_names,
target_vars=[
self._main_program.global_block().var(fetch_var_name)
for fetch_var_name in self._target_names
],
executor=self.exe,
main_program=self._main_program,
)
|
# encoding: utf-8
'''
@author: Minghao Guo
@contact: mh.guo0111@gmail.com
@software: nef
@file: sart.py
@date: 8/28/2019
@desc:
'''
from nefct import nef_class
import numpy as np
from nefct.data.image import Image
from nefct.data.projection import ProjectionSequence
from nefct.functions.project import Project
from nefct.functions.back_project import BackProject
import tensorflow as tf
from nefct.utils import tqdm
@nef_class
class SART:
n_iter: int
lambda_: float
emap: Image
project: Project
back_project: BackProject
def __call__(self, projection: ProjectionSequence, x: Image = None) -> Image:
if x is None:
x = self.emap * 0
for _ in tqdm(range(self.n_iter)):
_projection_tf = self.project(x)
_bproj_tf = self.back_project(projection - _projection_tf)
_bproj_tf2 = _bproj_tf.update(data = tf.div_no_nan(_bproj_tf.data,
self.emap.data))
x = x.update(data = (x + _bproj_tf2 * self.lambda_).data.numpy())
return x
|
#!/usr/bin/env python
#
#___INFO__MARK_BEGIN__
##########################################################################
# Copyright 2016,2017 Univa Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###########################################################################
#___INFO__MARK_END__
#
from utils import needs_uge
from utils import create_config_file
from utils import generate_random_string
from uge.api.qconf_api import QconfApi
from uge.config.config_manager import ConfigManager
from uge.log.log_manager import LogManager
from uge.exceptions.object_not_found import ObjectNotFound
from uge.exceptions.object_already_exists import ObjectAlreadyExists
create_config_file()
API = QconfApi()
OPERATOR_NAME = '%s' % generate_random_string(6)
CONFIG_MANAGER = ConfigManager.get_instance()
LOG_MANAGER = LogManager.get_instance()
@needs_uge
def test_list_operators():
ol = API.list_operators()
assert(ol is not None)
def test_add_operator():
ol = API.list_operators()
ol2 = API.add_operators([OPERATOR_NAME])
assert(len(ol2) == len(ol)+1)
assert(ol2.count(OPERATOR_NAME) == 1)
def test_delete_operator():
ol = API.list_operators()
ol2 = API.delete_operators([OPERATOR_NAME])
assert(len(ol2) == len(ol)-1)
assert(ol2.count(OPERATOR_NAME) == 0)
def test_object_already_exists():
API.add_operators([OPERATOR_NAME])
try:
API.add_operators([OPERATOR_NAME])
assert(False)
except ObjectAlreadyExists, ex:
# ok
pass
API.delete_operators([OPERATOR_NAME])
def test_object_not_found():
try:
API.delete_operators([OPERATOR_NAME])
except ObjectNotFound, ex:
# ok
pass
|
#!/usr/bin/env python
##############################################################################
#
# diffpy.structure by DANSE Diffraction group
# Simon J. L. Billinge
# (c) 2008 trustees of the Michigan State University.
# All rights reserved.
#
# File coded by: Pavol Juhas
#
# See AUTHORS.txt for a list of people who contributed.
# See LICENSE_DANSE.txt for license information.
#
##############################################################################
"""class Lattice stores properties and provides simple operations in lattice
coordinate system.
Module variables:
cartesian -- constant instance of Lattice, default Cartesian system
"""
import math
import numpy
import numpy.linalg as numalg
from diffpy.structure import LatticeError
# Helper Functions -----------------------------------------------------------
# exact values of cosd
_EXACT_COSD = {
0.0 : +1.0, 60.0 : +0.5, 90.0 : 0.0, 120.0 : -0.5,
180.0 : -1.0, 240.0 : -0.5, 270.0 : 0.0, 300.0 : +0.5
}
def cosd(x):
"""Return the cosine of x (measured in degrees).
Avoid round-off errors for exact cosine values.
"""
rv = _EXACT_COSD.get(x % 360.0)
if rv is None: rv = math.cos(math.radians(x))
return rv
def sind(x):
"""Return the sine of x (measured in degrees).
Avoid round-off errors for exact sine values.
"""
return cosd(90.0 - x)
# ----------------------------------------------------------------------------
class Lattice(object):
"""
General coordinate system and associated operations.
Parameters
----------
a : float or Lattice, optional
The cell length *a*. When present, other cell parameters
must be also specified. When of the *Lattice* type, create
a duplicate Lattice.
b : float
The cell length *b*.
c : float
The cell length *c*.
alpha : float
The angle between the *b* and *c* axes in degrees.
beta : float
The angle between the *b* and *c* axes in degrees.
gamma : float
The angle between the *a* and *b* axes in degrees.
baserot : array_like, optional
The 3x3 rotation matrix of the base vectors with respect
to their standard setting.
base : array_like, optional
The 3x3 array of row base vectors. This must be the
only argument when present.
Attributes
----------
metrics : ndarray
The metrics tensor.
base : ndarray
The 3x3 matrix of row base vectors in Cartesian coordinates,
which may be rotated, i.e., ``base = stdbase @ baserot``.
stdbase : ndarray
The 3x3 matrix of row base vectors in standard orientation.
baserot : ndarray
The rotation matrix for the `base`.
recbase : ndarray
The inverse of the `base` matrix, where the columns give
reciprocal vectors in Cartesian coordinates.
normbase : ndarray
The `base` vectors scaled by magnitudes of reciprocal cell lengths.
recnormbase : ndarray
The inverse of the `normbase` matrix.
isotropicunit : ndarray
The 3x3 tensor for a unit isotropic displacement parameters in this
coordinate system. This is an identity matrix when this Lattice
is orthonormal.
Note
----
The array attributes are read-only. They get updated by changing
some lattice parameters or by calling the `setLatPar()` or
`setLatBase()` methods.
Examples
--------
Create a Cartesian coordinate system::
>>> Lattice()
Create coordinate system with the cell lengths ``a``, ``b``, ``c``
and cell angles ``alpha``, ``beta``, ``gamma`` in degrees::
>>> Lattice(a, b, c, alpha, beta, gamma)
Create a duplicate of an existing Lattice ``lat``::
>>> Lattice(lat)
Create coordinate system with the base vectors given by rows
of the ``abc`` matrix::
>>> Lattice(base=abc)
"""
# round-off tolerance
_epsilon = 1.0e-8
# properties -------------------------------------------------------------
a = property(lambda self: self._a,
lambda self, value: self.setLatPar(a=value),
doc='The unit cell length *a*.')
b = property(lambda self: self._b,
lambda self, value: self.setLatPar(b=value),
doc='The unit cell length *b*.')
c = property(lambda self: self._c,
lambda self, value: self.setLatPar(c=value),
doc='The unit cell length *c*.')
alpha = property(lambda self: self._alpha,
lambda self, value: self.setLatPar(alpha=value),
doc='The cell angle *alpha* in degrees.')
beta = property(lambda self: self._beta,
lambda self, value: self.setLatPar(beta=value),
doc='The cell angle *beta* in degrees.')
gamma = property(lambda self: self._gamma,
lambda self, value: self.setLatPar(gamma=value),
doc='The cell angle *gamma* in degrees.')
# read-only derived properties
@property
def unitvolume(self):
'''The unit cell volume when a = b = c = 1.
'''
# Recalculate lattice cosines to ensure this is right
# even if ca, cb, cg data were not yet updated.
ca = cosd(self.alpha)
cb = cosd(self.beta)
cg = cosd(self.gamma)
rv = math.sqrt( 1.0 + 2.0*ca*cb*cg - ca*ca - cb*cb - cg*cg)
return rv
volume = property(lambda self: self.a * self.b * self.c * self.unitvolume,
doc='The unit cell volume.')
ar = property(lambda self: self._ar,
doc='The cell length *a* of the reciprocal lattice.')
br = property(lambda self: self._br,
doc='The cell length *b* of the reciprocal lattice.')
cr = property(lambda self: self._cr,
doc='The cell length *c* of the reciprocal lattice.')
alphar = property(lambda self: self._alphar,
doc='The reciprocal cell angle *alpha* in degrees.')
betar = property(lambda self: self._betar,
doc='The reciprocal cell angle *beta* in degrees')
gammar = property(lambda self: self._gammar,
doc='The reciprocal cell angle *gamma* in degrees')
ca = property(lambda self: self._ca,
doc='The cosine of the cell angle *alpha*.')
cb = property(lambda self: self._cb,
doc='The cosine of the cell angle *beta*.')
cg = property(lambda self: self._cg,
doc='The cosine of the cell angle *gamma*.')
sa = property(lambda self: self._sa,
doc='The sine of the cell angle *alpha*.')
sb = property(lambda self: self._sb,
doc='The sine of the cell angle *beta*.')
sg = property(lambda self: self._sg,
doc='The sine of the cell angle *gamma*.')
car = property(lambda self: self._car,
doc='The cosine of the reciprocal angle *alpha*.')
cbr = property(lambda self: self._cbr,
doc='The cosine of the reciprocal angle *beta*.')
cgr = property(lambda self: self._cgr,
doc='The cosine of the reciprocal angle *gamma*.')
sar = property(lambda self: self._sar,
doc='The sine of the reciprocal angle *alpha*.')
sbr = property(lambda self: self._sbr,
doc='flot: Sine of the reciprocal angle *beta*.')
sgr = property(lambda self: self._sgr,
doc='The sine of the reciprocal angle *gamma*.')
# done with properties ---------------------------------------------------
def __init__(self, a=None, b=None, c=None,
alpha=None, beta=None, gamma=None,
baserot=None, base=None):
# build a set of provided argument names for later use.
apairs = (('a', a), ('b', b), ('c', c),
('alpha', alpha), ('beta', beta), ('gamma', gamma),
('baserot', baserot), ('base', base))
argset = set(n for n, v in apairs if v is not None)
# initialize data members, they values will be set by setLatPar()
self._a = self._b = self._c = None
self._alpha = self._beta = self._gamma = None
self._ca = self._cb = self._cg = None
self._sa = self._sb = self._sg = None
self._ar = self._br = self._cr = None
self._alphar = self._betar = self._gammar = None
self._car = self._cbr = self._cgr = None
self._sar = self._sbr = self._sgr = None
self.baserot = numpy.identity(3)
self.base = self.recbase = None
self.normbase = self.recnormbase = None
# work out argument variants
# Lattice()
if not argset:
self.setLatPar(1.0, 1.0, 1.0, 90.0, 90.0, 90.0, baserot)
# Lattice(base=abc)
elif base is not None:
if len(argset) > 1:
raise ValueError("'base' must be the only argument.")
self.setLatBase(base)
# Lattice(lat)
elif isinstance(a, Lattice):
if len(argset) > 1:
raise ValueError("Lattice object must be the only argument.")
self.__dict__.update(a.__dict__)
# otherwise do default Lattice(a, b, c, alpha, beta, gamma)
else:
abcabg = ('a', 'b', 'c', 'alpha', 'beta', 'gamma')
if not argset.issuperset(abcabg):
raise ValueError("Provide all 6 cell parameters.")
self.setLatPar(a, b, c, alpha, beta, gamma, baserot=baserot)
return
def setLatPar(self, a=None, b=None, c=None,
alpha=None, beta=None, gamma=None, baserot=None):
"""Set one or more lattice parameters.
This updates all attributes that depend on the lattice parameters.
Parameters
----------
a : float, optional
The new value of the cell length *a*.
b : float, optional
The new value of the cell length *b*.
c : float, optional
The new value of the cell length *c*.
alpha : float, optional
The new value of the cell angle *alpha* in degrees.
beta : float, optional
The new value of the cell angle *beta* in degrees.
gamma : float, optional
The new value of the cell angle *gamma* in degrees.
baserot : array_like, optional
The new 3x3 rotation matrix of the base vectors with respect
to their standard setting in Cartesian coordinates.
Note
----
Parameters that are not specified will keep their initial
values.
"""
if a is not None: self._a = float(a)
if b is not None: self._b = float(b)
if c is not None: self._c = float(c)
if alpha is not None: self._alpha = float(alpha)
if beta is not None: self._beta = float(beta)
if gamma is not None: self._gamma = float(gamma)
if baserot is not None: self.baserot = numpy.array(baserot)
self._ca = ca = cosd(self.alpha)
self._cb = cb = cosd(self.beta)
self._cg = cg = cosd(self.gamma)
self._sa = sa = sind(self.alpha)
self._sb = sb = sind(self.beta)
self._sg = sg = sind(self.gamma)
# cache the unit volume value
Vunit = self.unitvolume
# reciprocal lattice
self._ar = ar = sa/(self.a*Vunit)
self._br = br = sb/(self.b*Vunit)
self._cr = cr = sg/(self.c*Vunit)
self._car = car = (cb*cg - ca)/(sb*sg)
self._cbr = cbr = (ca*cg - cb)/(sa*sg)
self._cgr = cgr = (ca*cb - cg)/(sa*sb)
self._sar = math.sqrt(1.0 - car*car)
self._sbr = math.sqrt(1.0 - cbr*cbr)
self._sgr = sgr = math.sqrt(1.0 - cgr*cgr)
self._alphar = math.degrees(math.acos(car))
self._betar = math.degrees(math.acos(cbr))
self._gammar = math.degrees(math.acos(cgr))
# metrics tensor
self.metrics = numpy.array( [
[ self.a*self.a, self.a*self.b*cg, self.a*self.c*cb ],
[ self.b*self.a*cg, self.b*self.b, self.b*self.c*ca ],
[ self.c*self.a*cb, self.c*self.b*ca, self.c*self.c ] ],
dtype=float )
# standard Cartesian coordinates of lattice vectors
self.stdbase = numpy.array( [
[ 1.0/ar, -cgr/sgr/ar, cb*self.a ],
[ 0.0, self.b*sa, self.b*ca ],
[ 0.0, 0.0, self.c ] ],
dtype=float )
# Cartesian coordinates of lattice vectors
self.base = numpy.dot(self.stdbase, self.baserot)
self.recbase = numalg.inv(self.base)
# bases normalized to unit reciprocal vectors
self.normbase = self.base * [[ar], [br], [cr]]
self.recnormbase = self.recbase / [ar, br, cr]
self.isotropicunit = _isotropicunit(self.recnormbase)
return
def setLatBase(self, base):
"""Set new base vectors for this lattice.
This updates the cell lengths and cell angles according to the
new base. The `stdbase`, `baserot`, and `metrics` attributes
are also updated.
Parameters
----------
base : array_like
The 3x3 matrix of row base vectors expressed
in Cartesian coordinates.
"""
self.base = numpy.array(base)
detbase = numalg.det(self.base)
if abs(detbase) < 1.0e-8:
emsg = "base vectors are degenerate"
raise LatticeError(emsg)
elif detbase < 0.0:
emsg = "base is not right-handed"
raise LatticeError(emsg)
self._a = a = math.sqrt(numpy.dot(self.base[0,:], self.base[0,:]))
self._b = b = math.sqrt(numpy.dot(self.base[1,:], self.base[1,:]))
self._c = c = math.sqrt(numpy.dot(self.base[2,:], self.base[2,:]))
self._ca = ca = numpy.dot(self.base[1,:], self.base[2,:]) / (b*c)
self._cb = cb = numpy.dot(self.base[0,:], self.base[2,:]) / (a*c)
self._cg = cg = numpy.dot(self.base[0,:], self.base[1,:]) / (a*b)
self._sa = sa = math.sqrt(1.0 - ca**2)
self._sb = sb = math.sqrt(1.0 - cb**2)
self._sg = sg = math.sqrt(1.0 - cg**2)
self._alpha = math.degrees(math.acos(ca))
self._beta = math.degrees(math.acos(cb))
self._gamma = math.degrees(math.acos(cg))
# cache the unit volume value
Vunit = self.unitvolume
# reciprocal lattice
self._ar = ar = sa/(self.a*Vunit)
self._br = br = sb/(self.b*Vunit)
self._cr = cr = sg/(self.c*Vunit)
self._car = car = (cb*cg - ca)/(sb*sg)
self._cbr = cbr = (ca*cg - cb)/(sa*sg)
self._cgr = cgr = (ca*cb - cg)/(sa*sb)
self._sar = math.sqrt(1.0 - car**2)
self._sbr = math.sqrt(1.0 - cbr**2)
self._sgr = sgr = math.sqrt(1.0 - cgr**2)
self._alphar = math.degrees(math.acos(car))
self._betar = math.degrees(math.acos(cbr))
self._gammar = math.degrees(math.acos(cgr))
# standard orientation of lattice vectors
self.stdbase = numpy.array([
[ 1.0/ar, -cgr/sgr/ar, cb*a ],
[ 0.0, b*sa, b*ca ],
[ 0.0, 0.0, c ]],
dtype=float)
# calculate unit cell rotation matrix, base = stdbase @ baserot
self.baserot = numpy.dot(numalg.inv(self.stdbase), self.base)
self.recbase = numalg.inv(self.base)
# bases normalized to unit reciprocal vectors
self.normbase = self.base * [[ar], [br], [cr]]
self.recnormbase = self.recbase / [ar, br, cr]
self.isotropicunit = _isotropicunit(self.recnormbase)
# update metrics tensor
self.metrics = numpy.array([
[ a*a, a*b*cg, a*c*cb ],
[ b*a*cg, b*b, b*c*ca ],
[ c*a*cb, c*b*ca, c*c ]],
dtype=float)
return
def abcABG(self):
"""
Returns
-------
A tuple of ``(a, b, c, alpha, beta, gamma)``.
"""
rv = (self.a, self.b, self.c, self.alpha, self.beta, self.gamma)
return rv
def reciprocal(self):
"""
Returns
-------
Lattice
The reciprocal lattice of the current lattice.
"""
rv = Lattice(base=numpy.transpose(self.recbase))
return rv
def cartesian(self, u):
"""Transform lattice vector to Cartesian coordinates.
Parameters
----------
u : array_like
Vector of lattice coordinates or an Nx3 array
of lattice vectors.
Returns
-------
rc : ndarray
Cartesian coordinates of the *u* vector.
"""
rc = numpy.dot(u, self.base)
return rc
def fractional(self, rc):
"""Transform Cartesian vector to fractional lattice coordinates.
Parameters
----------
rc : array_like
A vector of Cartesian coordinates or an Nx3 array of
Cartesian vectors.
Returns
-------
u : ndarray
Fractional coordinates of the Cartesian vector *rc*.
"""
u = numpy.dot(rc, self.recbase)
return u
def dot(self, u, v):
"""Calculate dot product of 2 lattice vectors.
Parameters
----------
u : array_like
The first lattice vector or an Nx3 array.
v : array_like
The second lattice vector or an array of
the same shape as *u*.
Returns
-------
float or ndarray
The dot product of lattice vectors *u*, *v*.
"""
dp = (u * numpy.dot(v, self.metrics)).sum(axis=-1)
return dp
def norm(self, xyz):
"""Calculate norm of a lattice vector.
Parameters
----------
xyz : array_like
A vector or an Nx3 array of fractional coordinates.
Returns
-------
float or ndarray
The magnitude of the lattice vector *xyz*.
"""
# this is a few percent faster than sqrt(dot(u, u)).
return numpy.sqrt((self.cartesian(xyz)**2).sum(axis=-1))
def rnorm(self, hkl):
"""Calculate norm of a reciprocal vector.
Parameters
----------
hkl : array_like
A vector or an Nx3 array of reciprocal coordinates.
Returns
-------
float or ndarray
The magnitude of the reciprocal vector *hkl*.
"""
hklcartn = numpy.dot(hkl, self.recbase.T)
return numpy.sqrt((hklcartn**2).sum(axis=-1))
def dist(self, u, v):
"""Calculate distance between 2 points in lattice coordinates.
Parameters
----------
u : array_like
A vector or an Nx3 matrix of fractional coordinates.
v : ndarray
A vector or an Nx3 matrix of fractional coordinates.
Note
----
*u* and *v* must be of the same shape when matrices.
Returns
-------
float or ndarray
The distance between lattice points *u* and *v*.
"""
duv = numpy.asarray(u) - v
return self.norm(duv)
def angle(self, u, v):
"""Calculate angle between 2 lattice vectors in degrees.
Parameters
----------
u : array_like
The first lattice vector.
v : array_like
The second lattice vector.
Returns
-------
float
The angle between lattice vectors *u* and *v* in degrees.
"""
ca = self.dot(u, v)/( self.norm(u)*self.norm(v) )
# avoid round-off errors that would make abs(ca) greater than 1
if numpy.isscalar(ca):
ca = max(min(ca, 1), -1)
rv = math.degrees(math.acos(ca))
else:
ca[ca < -1] = -1
ca[ca > +1] = +1
rv = numpy.degrees(numpy.arccos(ca))
return rv
def isanisotropic(self, umx):
"""True if displacement parameter matrix is anisotropic.
This checks if the specified matrix of anisotropic displacement
parameters (ADP) differs from isotropic values for this lattice
by more than a small round-off error.
Parameters
----------
umx : array_like
The 3x3 matrix of displacement parameters.
Returns
-------
bool
True when *umx* is anisotropic by more than a round-off error.
"""
umx = numpy.asarray(umx)
utr = numpy.trace(umx) / umx.shape[0]
udmax = numpy.fabs(umx - utr * self.isotropicunit).max()
rv = udmax > self._epsilon
return rv
def __repr__(self):
"""String representation of this lattice.
"""
I3 = numpy.identity(3, dtype=float)
rotbaseI3diff = max(numpy.reshape(numpy.fabs(self.baserot-I3), 9))
cartlatpar = numpy.array([1.0, 1.0, 1.0 , 90.0, 90.0, 90.0])
latpardiff = cartlatpar - self.abcABG()
if rotbaseI3diff > self._epsilon:
s = "Lattice(base=%r)" % self.base
elif numpy.fabs(latpardiff).max() < self._epsilon:
s = "Lattice()"
else:
s = "Lattice(a=%g, b=%g, c=%g, alpha=%g, beta=%g, gamma=%g)" % \
self.abcABG()
return s
# End of class Lattice
# Local Helpers --------------------------------------------------------------
def _isotropicunit(recnormbase):
"""Calculate tensor of unit isotropic displacement parameters.
Parameters
----------
recnormbase : ndarray
The inverse of normalized base vectors of some lattice.
Returns
-------
ndarray
The 3x3 matrix of displacement parameters corresponding to
a unit isotropic displacements.
"""
isounit = numpy.dot(recnormbase.T, recnormbase)
# ensure there are no round-off deviations on the diagonal
isounit[0, 0] = 1
isounit[1, 1] = 1
isounit[2, 2] = 1
return isounit
# Module Constants -----------------------------------------------------------
cartesian = Lattice()
|
import json
import argparse
from deepfrier.Predictor import Predictor
def get_all_labels(annot_file, ont='mf'):
if ont == 'ec':
with open(annot_file, "r") as f:
f.readline()
tasks = f.readline().strip().split("\t")
task_dict = {v: k for k, v in enumerate(tasks)}
f.readline()
labels = {}
for line in f:
name, pos_tasks = line.strip().split("\t")
pos_tasks = [task_dict[x] for x in pos_tasks.split(",")]
labels[name] = pos_tasks
else:
with open(annot_file, "r") as f:
lines = f.readlines()
if ont == 'mf':
idx = 1
elif ont == 'bp':
idx = 2
elif ont == 'cc':
idx = 3
tasks = lines[(idx - 1) * 4 + 1].strip().split("\t")
task_dict = {v: k for k, v in enumerate(tasks)}
lines = lines[13:]
labels = {}
for line in lines:
name = line.strip().split("\t")[0]
try:
pos_tasks = line.strip().split("\t")[idx]
pos_tasks = [task_dict[x] for x in pos_tasks.split(",")]
except:
pos_tasks = []
labels[name] = pos_tasks
return labels
def get_seq_dict(fasta_file, split_file='', cutoff=95):
if not split_file == '':
select_list = []
with open(split_file, 'r') as f:
head = f.readline().strip()
fields = head.split(',')
col = fields.index("<{}%".format(str(cutoff)))
for line in f.readlines():
line = line.strip()
if line == '':
continue
pdb_id = line.split(',')[0]
valid = int(line.split(',')[col])
if valid:
select_list.append(pdb_id)
else:
select_list = None
seq_dict = {}
f = open(fasta_file, 'r')
for line in f.readlines():
line = line.strip()
if line == '':
continue
if line.startswith('>'):
_id = line.replace('>', '').split(' ')[0]
seq_dict[_id] = ''
else:
seq_dict[_id] += line
if select_list is not None:
seq_dict = {k: v for k, v in seq_dict.items() if k in select_list}
return seq_dict
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-s', '--seq', type=str, help="Protein sequence to be annotated.")
parser.add_argument('-cm', '--cmap', type=str, help="Protein contact map to be annotated (in *npz file format).")
parser.add_argument('-pdb', '--pdb_fn', type=str, help="Protein PDB file to be annotated.")
parser.add_argument('--cmap_csv', type=str, help="Catalogue with chain to file path mapping.")
parser.add_argument('--pdb_dir', type=str, help="Directory with PDB files of predicted Rosetta/DMPFold structures.")
parser.add_argument('--fasta_fn', type=str, help="Fasta file with protein sequences.")
parser.add_argument('--model_config', type=str, default='./trained_models/model_config.json', help="JSON file with model names.")
parser.add_argument('-ont', '--ontology', type=str, default=['mf'], nargs='+', required=True, choices=['mf', 'bp', 'cc', 'ec'],
help="Gene Ontology/Enzyme Commission.")
parser.add_argument('-o', '--output_fn_prefix', type=str, default='DeepFRI', help="Save predictions/saliency in file.")
parser.add_argument('-v', '--verbose', help="Prints predictions.", action="store_true")
parser.add_argument('--use_guided_grads', help="Use guided grads to compute gradCAM.", action="store_true")
parser.add_argument('--saliency', help="Compute saliency maps for every protein and every MF-GO term/EC number.", action="store_true")
parser.add_argument('--annot_file', type=str, help='The annotation file.')
parser.add_argument('--fasta_file', type=str, default='', help='The fasta file for all test sequences')
parser.add_argument('--split_file', type=str, default='', help='The split file for all test sequences')
parser.add_argument('--cutoff', type=int, default=95, choices=[30, 40, 50, 70, 95], help='Sequence identity cutoff')
args = parser.parse_args()
with open(args.model_config) as json_file:
params = json.load(json_file)
if args.seq is not None or args.fasta_fn is not None:
params = params['cnn']
elif args.cmap is not None or args.pdb_fn is not None or args.cmap_csv is not None or args.pdb_dir is not None:
params = params['gcn']
gcn = params['gcn']
layer_name = params['layer_name']
models = params['models']
labels = get_all_labels(args.annot_file, ont=args.ontology[0])
if not args.fasta_file == '':
seq_dict = get_seq_dict(args.fasta_file, split_file=args.split_file, cutoff=args.cutoff)
else:
seq_dict = None
for ont in args.ontology:
predictor = Predictor(models[ont], gcn=gcn)
if args.seq is not None:
predictor.predict(args.seq)
if args.cmap is not None:
predictor.predict(args.cmap)
if args.pdb_fn is not None:
predictor.predict(args.pdb_fn)
if args.fasta_fn is not None:
predictor.predict_from_fasta(args.fasta_fn, labels, split_file=args.split_file, cutoff=args.cutoff)
if args.cmap_csv is not None:
predictor.predict_from_catalogue(args.cmap_csv)
if args.pdb_dir is not None:
predictor.predict_from_PDB_dir(args.pdb_dir, labels, seq_dict=seq_dict)
# save predictions
# predictor.export_csv(args.output_fn_prefix + "_" + ont.upper() + "_predictions.csv", args.verbose)
# predictor.save_predictions(args.output_fn_prefix + "_" + ont.upper() + "_pred_scores.json")
#
# # save saliency maps
# if args.saliency and ont in ['mf', 'ec']:
# predictor.compute_GradCAM(layer_name=layer_name, use_guided_grads=args.use_guided_grads)
# predictor.save_GradCAM(args.output_fn_prefix + "_" + ont.upper() + "_saliency_maps.json")
|
"""
The ``serve`` subcommand launches a server
that exposes trained models via a REST API,
and that includes a web interface for exploring
their predictions.
.. code-block:: bash
$ python -m allennlp.run serve --help
usage: run [command] serve [-h] [--port PORT] [--workers WORKERS]
[--config-file CONFIG_FILE]
Run the web service, which provides an HTTP API as well as a web demo.
optional arguments:
-h, --help show this help message and exit
--port PORT
--workers WORKERS
--config-file CONFIG_FILE
path to a JSON file specifying the configuration for
the models
"""
import argparse
from typing import Dict
from allennlp.service import server_sanic
def add_subparser(parser: argparse._SubParsersAction,
trained_models: Dict[str, str]) -> argparse.ArgumentParser:
# pylint: disable=protected-access
description = '''Run the web service, which provides an HTTP API as well as a web demo.'''
subparser = parser.add_parser(
'serve', description=description, help='Run the web service and demo.')
subparser.add_argument('--port', type=int, default=8000)
subparser.add_argument('--workers', type=int, default=1)
subparser.set_defaults(func=serve(trained_models))
return subparser
def serve(trained_models: Dict[str, str]):
def serve_inner(args: argparse.Namespace) -> None:
server_sanic.run(args.port, args.workers, trained_models)
return serve_inner
|
#··················································································#
#··················································································#
# How to run #
# python3 ProtList.py -i ./ScopDatabaseFile.txt -o ScopeNewList.txt #
#··················································································#
#··················································································#
#··················································································#
#··················································································#
# Modules #
#··················································································#
#··················································································#
import re
import os
import argparse
#··················································································#
#··················································································#
# Arguments #
#··················································································#
#··················································································#
# Arguments that contains the paths to the
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--inputfile", help="Path to the file that contains the SCOP database proteins")
parser.add_argument("-o", "--outputfile", help="Path to the file that will contain the protein list")
args = parser.parse_args()
# Get values from the arguments
InputFile = args.inputfile
OutputFile = args.outputfile
#··················································································#
#··················································································#
# Main Code #
#··················································································#
#··················································································#
# Open the file that contains the proteins from SCOP database
File = open(InputFile,"r");
Result = open(OutputFile,"w")
# Find all the proteins in the file with a regular expression
Pattern = re.compile(r'>.+ \(\w:\)')
Count = 0
for line in File:
if line.startswith(">"):
try:
# Once the program found a protein, this save it into a list file
Match = Pattern.findall(line)
Protein = Match[0].replace(">","")
Protein = Protein.split(" ")
Protein = Protein[0] + " " + Protein[1] + "\n"
Result.write(Protein)
Count += 1
except:
continue
# Show how many proteins the script fount
print("The script succesfully found {} proteins".format(Count))
# Close files
File.close()
Result.close()
|
"""Code for flask mongodb extension in scout"""
import os
from scout.adapter.client import get_connection
class MongoDB:
"""Flask interface to mongodb"""
@staticmethod
def init_app(app):
"""Initialize from flask"""
db_name = os.environ.get("MONGO_DBNAME") or app.config.get("MONGO_DBNAME", "scout")
client = get_connection(
host=os.environ.get("MONGO_HOST") or app.config.get("MONGO_HOST", "localhost"),
port=os.environ.get("MONGO_PORT") or app.config.get("MONGO_PORT", 27017),
username=os.environ.get("MONGO_USERNAME") or app.config.get("MONGO_USERNAME", None),
password=os.environ.get("MONGO_PASSWORD") or app.config.get("MONGO_PASSWORD", None),
uri=os.environ.get("MONGO_URI") or app.config.get("MONGO_URI", None),
mongodb=db_name,
)
app.config["MONGO_DATABASE"] = client[db_name]
app.config["MONGO_CLIENT"] = client
def __repr__(self):
return f"{self.__class__.__name__}"
|
from enum import Enum
class FieldState(Enum):
NONE = " "
BOT = "O"
PLAYER = "X"
|
#!/usr/bin/env python
import asyncio
import json
import threading
import time
import websockets
queue = asyncio.Queue()
new_loop = asyncio.new_event_loop()
def start_loop(loop):
asyncio.set_event_loop(loop)
loop.run_forever()
async def producer():
for i in range(5):
await asyncio.sleep(1)
queue.put_nowait(i)
print(f'put {i}')
async def consumer(websocket):
print('server starts to wait producer...')
while 1:
var = await queue.get()
print(f'get {var}')
t_serv_rec = str(time.time())
payload = json.dumps((var, t_serv_rec))
await websocket.send(payload)
async def echo(websocket, path):
name = await websocket.recv()
tasks = [producer(), consumer(websocket)]
asyncio.gather(*tasks, return_exceptions=True)
start_server = websockets.serve(echo, 'localhost', 8888)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
|
# Copyright (c) 2016 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import constants as lib_constants
from neutron.objects import address_scope
from neutron.tests.unit.objects import test_base
from neutron.tests.unit.objects import test_rbac
from neutron.tests.unit import testlib_api
class AddressScopeIfaceObjectTestCase(test_base.BaseObjectIfaceTestCase):
_test_class = address_scope.AddressScope
class AddressScopeDbObjectTestCase(test_base.BaseDbObjectTestCase,
testlib_api.SqlTestCase):
_test_class = address_scope.AddressScope
class AddressScopeRBACDbObjectTestCase(test_rbac.TestRBACObjectMixin,
test_base.BaseDbObjectTestCase,
testlib_api.SqlTestCase):
_test_class = address_scope.AddressScopeRBAC
def setUp(self):
super(AddressScopeRBACDbObjectTestCase, self).setUp()
for obj in self.db_objs:
as_obj = address_scope.AddressScope(
self.context,
id=obj['object_id'],
name="test_as_%s_%s" % (obj['object_id'], obj['project_id']),
project_id=obj['project_id'],
ip_version=lib_constants.IP_ALLOWED_VERSIONS[0],
)
as_obj.create()
def _create_test_address_scope_rbac(self):
self.objs[0].create()
return self.objs[0]
class AddressScopeRBACIfaceObjectTestCase(test_rbac.TestRBACObjectMixin,
test_base.BaseObjectIfaceTestCase):
_test_class = address_scope.AddressScopeRBAC
|
from rpython.rlib import jit
from . import oop, pretty
class Cont(pretty.PrettyBase):
_immutable_ = True
def to_pretty(self):
return pretty.atom('#<Cont>')
# Not necessarily safe to call this directly.
def plug_reduce(self, w_value, env):
assert isinstance(w_value, oop.W_Value)
raise NotImplementedError('Cont.plug_reduce: abstract method')
class Halt(Cont):
_immutable_ = True
def to_pretty(self):
return pretty.atom('#halt')
def plug_reduce(self, w_value, env):
from .interp import HaltException
raise HaltException(w_value)
# From pycket. This helps to avoid stack overflow for ReturnCont.
def label0(func, enter):
from .ast import Expr
func = jit.unroll_safe(func)
class Label(Expr):
_immutable_ = True
should_enter = enter
def evaluate(self, env, cont):
assert isinstance(cont, ValueCont)
w_value = cont._w_value
prev = cont._cont
return func(prev, w_value, env)
def to_pretty(self):
return pretty.atom('#label').append_kw('name', func.func_name) \
.append_kw('module', func.__module__) \
.append_kw('line', func.__code__.co_firstlineno)
class ValueCont(Cont):
_immutable_ = True
def __init__(self, w_value, cont):
self._w_value = w_value
self._cont = cont
label_instance = Label()
def wraps(cont, w_value, env):
return label_instance, env, ValueCont(w_value, cont)
return wraps
def label(func): return label0(func, enter=False)
def loop_label(func): return label0(func, enter=True)
|
# A part of esc2pdf (https://github.com/szihlmann/esc2pdf)
# Copyright (C) 2021 Serge Zihlmann, Bern, Switzerland
# MIT license -- See LICENSE.txt for details
class State(object):
# State object which provides utility functions for the individual state
def __init__(self):
pass
def on_byte(self, byte, Flowable):
# Handle events that are delegated to this State.
return self
def Name(self):
# Returns the name of the State.
return self.__class__.__name__
def isState(self, stateName = 'None'):
# Verifies that state corresponds to a given stateName (string)
return self.__class__.__name__ == stateName
|
# Data science project config file
import os
# Project name
PROJECT_NAME = 'kaggle-talkingdata2'
# Paths
DATA_BASE_PATH = os.path.expanduser(os.getenv('DATA_BASE_PATH'))
DATA_PATH = os.path.join(DATA_BASE_PATH, PROJECT_NAME)
# Comet
COMET = {
'api_key': os.getenv('COMET_API_KEY'),
'project_name': PROJECT_NAME,
'auto_param_logging': True,
'auto_metric_logging': False,
'parse_args': False
}
|
import pyowm
import json
import requests
from pyowm import timeutils, exceptions
from telegram import Message, Chat, Update, Bot, InlineKeyboardButton, InlineKeyboardMarkup
from telegram.ext import run_async
from emilia import dispatcher, updater, API_WEATHER, API_ACCUWEATHER, spamcheck
from emilia.modules.disable import DisableAbleCommandHandler
from emilia.modules.languages import tl
from emilia.modules.helper_funcs.alternate import send_message
@run_async
@spamcheck
def cuaca(update, context):
args = context.args
location = " ".join(args)
if location.lower() == context.bot.first_name.lower():
send_message(update.effective_message, tl(update.effective_message, "Saya akan terus mengawasi di saat senang maupun sedih!"))
context.bot.send_sticker(update.effective_chat.id, BAN_STICKER)
return
try:
owm = pyowm.OWM(API_WEATHER, language='id')
observation = owm.weather_at_place(location)
cuacanya = observation.get_weather()
obs = owm.weather_at_place(location)
lokasi = obs.get_location()
lokasinya = lokasi.get_name()
temperatur = cuacanya.get_temperature(unit='celsius')['temp']
fc = owm.three_hours_forecast(location)
# Simbol cuaca
statusnya = ""
cuacaskrg = cuacanya.get_weather_code()
if cuacaskrg < 232: # Hujan badai
statusnya += "⛈️ "
elif cuacaskrg < 321: # Gerimis
statusnya += "🌧️ "
elif cuacaskrg < 504: # Hujan terang
statusnya += "🌦️ "
elif cuacaskrg < 531: # Hujan berawan
statusnya += "⛈️ "
elif cuacaskrg < 622: # Bersalju
statusnya += "🌨️ "
elif cuacaskrg < 781: # Atmosfer
statusnya += "🌪️ "
elif cuacaskrg < 800: # Cerah
statusnya += "🌤️ "
elif cuacaskrg < 801: # Sedikit berawan
statusnya += "⛅️ "
elif cuacaskrg < 804: # Berawan
statusnya += "☁️ "
statusnya += cuacanya._detailed_status
cuacabsk = besok.get_weather_code()
send_message(update.effective_message, tl(update.effective_message, "{} hari ini sedang {}, sekitar {}°C.\n").format(lokasinya,
statusnya, temperatur))
except pyowm.exceptions.api_call_error.APICallError:
send_message(update.effective_message, tl(update.effective_message, "Tulis lokasi untuk mengecek cuacanya"))
except pyowm.exceptions.api_response_error.NotFoundError:
send_message(update.effective_message, tl(update.effective_message, "Maaf, lokasi tidak ditemukan 😞"))
else:
return
@run_async
@spamcheck
def accuweather(update, context):
chat_id = update.effective_chat.id
message = update.effective_message
args = context.args
if not args:
return send_message(update.effective_message, tl(update.effective_message, "Masukan nama lokasinya untuk mengecek cuacanya!"))
location = " ".join(args)
if location.lower() == context.bot.first_name.lower():
send_message(update.effective_message, tl(update.effective_message, "Saya akan terus mengawasi di saat senang maupun sedih!"))
context.bot.send_sticker(update.effective_chat.id, BAN_STICKER)
return
if True:
url = "http://api.accuweather.com/locations/v1/cities/search.json?q={}&apikey={}".format(location, API_ACCUWEATHER)
headers = {'Content-type': 'application/json'}
r = requests.get(url, headers=headers)
try:
data = r.json()[0]
except:
return send_message(update.effective_message, tl(update.effective_message, "Maaf, lokasi tidak ditemukan 😞"))
locid = data.get('Key')
weatherlang = tl(update.effective_message, "weather_lang")
urls = "http://api.accuweather.com/currentconditions/v1/{}.json?apikey={}&language={}&details=true&getphotos=true".format(locid, API_ACCUWEATHER, weatherlang)
rs = requests.get(urls, headers=headers)
datas = rs.json()[0]
if datas.get('WeatherIcon') <= 44:
icweather = "☁"
elif datas.get('WeatherIcon') <= 42:
icweather = "⛈"
elif datas.get('WeatherIcon') <= 40:
icweather = "🌧"
elif datas.get('WeatherIcon') <= 38:
icweather = "☁"
elif datas.get('WeatherIcon') <= 36:
icweather = "⛅"
elif datas.get('WeatherIcon') <= 33:
icweather = "🌑"
elif datas.get('WeatherIcon') <= 32:
icweather = "🌬"
elif datas.get('WeatherIcon') <= 31:
icweather = "⛄"
elif datas.get('WeatherIcon') <= 30:
icweather = "🌡"
elif datas.get('WeatherIcon') <= 29:
icweather = "☃"
elif datas.get('WeatherIcon') <= 24:
icweather = "❄"
elif datas.get('WeatherIcon') <= 23:
icweather = "🌥"
elif datas.get('WeatherIcon') <= 19:
icweather = "☁"
elif datas.get('WeatherIcon') <= 18:
icweather = "🌨"
elif datas.get('WeatherIcon') <= 17:
icweather = "🌦"
elif datas.get('WeatherIcon') <= 15:
icweather = "⛈"
elif datas.get('WeatherIcon') <= 14:
icweather = "🌦"
elif datas.get('WeatherIcon') <= 12:
icweather = "🌧"
elif datas.get('WeatherIcon') <= 11:
icweather = "🌫"
elif datas.get('WeatherIcon') <= 8:
icweather = "⛅️"
elif datas.get('WeatherIcon') <= 5:
icweather = "☀️"
else:
icweather = ""
cuaca = "*{} {}*\n".format(icweather, datas.get('WeatherText'))
cuaca += tl(update.effective_message, "*Suhu:* `{}°C`/`{}°F`\n").format(datas.get('Temperature').get('Metric').get('Value'), datas.get('Temperature').get('Imperial').get('Value'))
cuaca += tl(update.effective_message, "*Kelembapan:* `{}`\n").format(datas.get('RelativeHumidity'))
direct = "{}".format(datas.get('Wind').get('Direction').get('English'))
direct = direct.replace("N", "↑").replace("E", "→").replace("S", "↓").replace("W", "←")
cuaca += tl(update.effective_message, "*Angin:* `{} {} km/h` | `{} mi/h`\n").format(direct, datas.get('Wind').get('Speed').get('Metric').get('Value'), datas.get('Wind').get('Speed').get('Imperial').get('Value'))
cuaca += tl(update.effective_message, "*Tingkat UV:* `{}`\n").format(datas.get('UVIndexText'))
cuaca += tl(update.effective_message, "*Tekanan:* `{}` (`{} mb`)\n").format(datas.get('PressureTendency').get('LocalizedText'), datas.get('Pressure').get('Metric').get('Value'))
lok = []
lok.append(data.get('LocalizedName'))
lok.append(data.get('AdministrativeArea').get('LocalizedName'))
for x in reversed(range(len(data.get('SupplementalAdminAreas')))):
lok.append(data.get('SupplementalAdminAreas')[x].get('LocalizedName'))
lok.append(data.get('Country').get('LocalizedName'))
teks = tl(update.effective_message, "*Cuaca di {} saat ini*\n").format(data.get('LocalizedName'))
teks += "{}\n".format(cuaca)
teks += tl(update.effective_message, "*Lokasi:* `{}`\n\n").format(", ".join(lok))
# try:
# context.bot.send_photo(chat_id, photo=datas.get('Photos')[0].get('LandscapeLink'), caption=teks, parse_mode="markdown", reply_to_message_id=message.message_id, reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(text="More info", url=datas.get('Link'))]]))
# except:
send_message(update.effective_message, teks, parse_mode="markdown", disable_web_page_preview=True, reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(text="More info", url=datas.get('Link'))]]))
__help__ = "weather_help"
__mod_name__ = "Weather"
CUACA_HANDLER = DisableAbleCommandHandler(["cuaca", "weather"], accuweather, pass_args=True)
# ACCUWEATHER_HANDLER = DisableAbleCommandHandler("accuweather", accuweather, pass_args=True)
dispatcher.add_handler(CUACA_HANDLER)
# dispatcher.add_handler(ACCUWEATHER_HANDLER)
|
from django.shortcuts import redirect
def admin_check(user):
if user.is_active and user.is_authenticated:
if user.is_user_admin:
return True
return False
def admin_required(function=None, redirect_field_name=None, login_url='/login/'):
"""
Decorator for views that checks that the user is logged in
and has the "admin" permission set.
"""
def _decorated(view_func):
def _view(request, *args, **kwargs):
if admin_check(request.user):
return view_func(request, *args, **kwargs)
else:
# Not admin
return redirect(login_url)
_view.__name__ = view_func.__name__
_view.__dict__ = view_func.__dict__
_view.__doc__ = view_func.__doc__
return _view
if function is None:
return _decorated
else:
return _decorated(function)
|
"""
demo04_bin.py 二值化
"""
import numpy as np
import sklearn.preprocessing as sp
samples = np.array([[17., 100., 4000.],
[20., 80., 5000.],
[23., 60., 5500.]])
bin = sp.Binarizer(threshold=80)
r_samples = bin.transform(samples)
print(r_samples)
samples[samples<=80] = 0
samples[samples>80] = 1
print(samples)
|
def getStrandPairMethylation(methylationLocationsOneFileName, methylationLocationsTwoFileName, outputFileName):
# Gets the locations of the methylated bases that are in both strands
# Also determines the fraction of methylated bases in each strand that are methylated in the other strand
# ASSUMES THAT LOCATIONS IN FILE TWO CORRESPOND TO LOCATIONS IN FILE ONE + 1 (usually: file one is OT, two is OB)
# ASSUMES THAT BOTH FILES ARE SORTED BY CHROM, BASE AND HAVE NO REPEATS
methylationLocationsOneFile = open(methylationLocationsOneFileName)
methylationLocationsTwoFile = open(methylationLocationsTwoFileName)
outputFile = open(outputFileName, 'w+')
methylationOneCount = 0
methylationOverlapCount = 0
methylationTwoCount = 0
lineTwo = methylationLocationsTwoFile.readline()
lineTwoElements = lineTwo.split("\t")
fileTwoDone = False
for line in methylationLocationsOneFile:
# Iterate through locations in the first file and find those that correspond to locations in the second file
methylationOneCount = methylationOneCount + 1
if fileTwoDone == True:
# The second file has been read to completion, so continue
continue
lineElements = line.split("\t")
while lineElements[0] > lineTwoElements[0]:
# Iterate through lines from the second file until a line with the right chromosome is reached
methylationTwoCount = methylationTwoCount + 1
lineTwo = methylationLocationsTwoFile.readline()
if lineTwo == "":
# The second file has been read to completion, so stop
fileTwoDone = True
break
lineTwoElements = lineTwo.split("\t")
if fileTwoDone == True:
# The second file has been read to completion, so continue
continue
while (lineElements[0] == lineTwoElements[0]) and (int(lineElements[1].strip()) >= int(lineTwoElements[1].strip())):
# Iterate through the lines from the second file until a line with the right base is reached
methylationTwoCount = methylationTwoCount + 1
lineTwo = methylationLocationsTwoFile.readline()
if lineTwo == "":
# The second file has been read to completion, so stop
fileTwoDone = True
break
lineTwoElements = lineTwo.split("\t")
if fileTwoDone == True:
# The second file has been read to completion, so continue
continue
if (lineElements[0] == lineTwoElements[0]) and (int(lineElements[1].strip()) + 1 == int(lineTwoElements[1].strip())):
methylationOverlapCount = methylationOverlapCount + 1
outputFile.write(lineElements[0] + "\t" + lineElements[1].strip() + "\n")
if fileTwoDone == False:
# Increment counts for the second file based on how many lines remain
methylationTwoCount = methylationTwoCount + 1 # The latest line has not yet been counted
methylationTwoLines = methylationLocationsTwoFile.readlines()
methylationTwoCount = methylationTwoCount + len(methylationTwoLines)
print float(methylationOverlapCount)/float(methylationOneCount)
print float(methylationOverlapCount)/float(methylationTwoCount)
methylationLocationsOneFile.close()
methylationLocationsTwoFile.close()
outputFile.close()
if __name__=="__main__":
import sys
methylationLocationsOneFileName = sys.argv[1]
methylationLocationsTwoFileName = sys.argv[2]
outputFileName = sys.argv[3]
getStrandPairMethylation(methylationLocationsOneFileName, methylationLocationsTwoFileName, outputFileName)
|
"""
Module to write truth catalogs for AGNs using the AGNs parameters db as input.
"""
import os
import sys
import json
import logging
import sqlite3
import numpy as np
import pandas as pd
from lsst.sims.photUtils import PhotometricParameters
from lsst.sims.utils import angularSeparation
from .synthetic_photometry import SyntheticPhotometry, find_sed_file
from .write_sqlite import write_sqlite
__all__ = ['AGNTruthWriter', 'agn_mag_norms', 'write_agn_variability_truth']
logging.basicConfig(format="%(asctime)s %(name)s: %(message)s",
stream=sys.stdout)
def agn_mag_norms(mjds, redshift, tau, sf, seed, start_date=58580.):
"""
Return the delta mag_norm values wrt the infinite-time average
mag_norm for the provided AGN light curve parameters. mag_norm is
the object's un-reddened monochromatic magnitude at 500nm.
Parameters
----------
mjds: np.array
Times at which to evaluate the light curve delta flux values in MJD.
Observer frame.
redshift: float
Redshift of the AGN, used to account for time-dilation between
rest frame and observer frame.
tau: float
Variability time scale in days.
sf: float
Structure function parameter, i.e., asymptotic rms variability on
long time scales.
seed: int
Random number seed.
start_date: float [58580.]
Start date for the random walk in MJD. This will ensure that the
same random walk is generated for a given redshift, tau, sf,
and seed regardless of the mjds requested.
Returns
-------
np.array of delta mag_norm values.
Notes
-----
This code is stolen from
https://github.com/astroML/astroML/blob/master/astroML/time_series/generate.py
"""
if min(mjds) < start_date:
raise RuntimeError(f'mjds must start after {start_date}')
t_obs = np.arange(start_date, max(mjds + 1), dtype=float)
t_rest = t_obs/(1 + redshift)/tau
rng = np.random.RandomState(seed)
nbins = len(t_rest)
steps = rng.normal(0, 1, nbins)
delta_mag_norm = np.zeros(nbins)
delta_mag_norm[0] = steps[0]*sf
for i in range(1, nbins):
dt = t_rest[i] - t_rest[i - 1]
delta_mag_norm[i] \
= delta_mag_norm[i - 1]*(1. - dt) + np.sqrt(2*dt)*sf*steps[i]
return np.interp(mjds, t_obs, delta_mag_norm)
class AGNTruthWriter:
'''
Write Summary and Variable truth tables for unlensed AGNs.
'''
agn_type_id = 117
def __init__(self, outfile, agn_db_file,
ddf_bounds=(52.479, 53.771, -28.667, -27.533)):
'''
Parameters
----------
outfile: str
Name of the sqlite3 file to contain the truth tables.
agn_db_file: str
The sqlite3 file containing the AGN model parameters.
ddf_bounds: 4-tuple [(52.479, 53.771, -28.667, -27.533)]
Bounds of DDF region in degrees.
'''
self.outfile = outfile
if not os.path.isfile(agn_db_file):
raise FileNotFoundError(f'{agn_db_file} not found.')
self.conn = sqlite3.connect(agn_db_file)
ra_min, ra_max, dec_min, dec_max = ddf_bounds
self.query = f'''select galaxy_id, magNorm, redshift, M_i, ra, dec,
varParamStr from agn_params where {ra_min} <= ra
and ra <= {ra_max} and {dec_min} <= dec
and dec <= {dec_max} '''
curs = self.conn.execute(self.query)
self.icol = {_[0]: icol for icol, _ in enumerate(curs.description)}
@staticmethod
def object_id(galaxy_id):
"""Return the AGN object ID based on the host galaxy ID"""
return str(galaxy_id*1024 + AGNTruthWriter.agn_type_id)
def write(self, chunk_size=10000, verbose=False):
'''
Extract the column data from the agn db file and write
the summary truth table to the sqlite file.
Parameters
----------
chunk_size: int [10000]
Number of records to read in at a time from the star db
file and write to the output file.
verbose: bool [False]
Flag to write the number of records that have been processed.
'''
logger = logging.getLogger('AGNTruthWriter.write')
if verbose:
logger.setLevel(logging.INFO)
bands = 'ugrizy'
curs = self.conn.execute(self.query)
irec = 0
while True:
ids, galaxy_ids, ra, dec, redshift = [], [], [], [], []
is_variable, is_pointsource = [], []
flux_by_band_MW = {_: [] for _ in bands}
flux_by_band_noMW = {_: [] for _ in bands}
chunk = curs.fetchmany(chunk_size)
if not chunk:
break
logger.info('%d', irec)
for row in chunk:
irec += 1
# All AGNs are variable point sources:
is_pointsource.append(1)
is_variable.append(1)
# AGN-dependent entries:
ra.append(row[self.icol['ra']])
dec.append(row[self.icol['dec']])
redshift.append(row[self.icol['redshift']])
ids.append(self.object_id(row[self.icol['galaxy_id']]))
galaxy_ids.append(row[self.icol['galaxy_id']])
sed_file = find_sed_file('agnSED/agn.spec.gz')
# Create SyntheticPhotometry object initially without
# Milky Way dust parameters.
synth_phot = SyntheticPhotometry(sed_file,
row[self.icol['magNorm']],
redshift[-1])
for band in bands:
flux_by_band_noMW[band].append(synth_phot.calcFlux(band))
# Set Milky Way dust parameters and compute ugrizy fluxes.
synth_phot.add_MW_dust(ra[-1], dec[-1], Rv=3.1)
for band in bands:
flux_by_band_MW[band].append(synth_phot.calcFlux(band))
write_sqlite(self.outfile,
ids=ids,
galaxy_ids=galaxy_ids,
ra=ra,
dec=dec,
redshift=redshift,
is_variable=is_variable,
is_pointsource=is_pointsource,
flux_by_band_MW=flux_by_band_MW,
flux_by_band_noMW=flux_by_band_noMW,
good_ixes=range(len(ids)),
create_index=False)
with sqlite3.connect(self.outfile) as conn:
conn.cursor().execute('create index radec_ix on '
'truth_summary(ra,dec)')
conn.commit()
def write_auxiliary_truth(self, chunk_size=10000, verbose=False):
"""
Write the AGN auxiliary truth table from the AGN db file.
Parameters
----------
chunk_size: int [10000]
Number of records to read in at a time from the star db
file and write to the output file.
verbose: bool [False]
Flag to write the number of records that have been processed.
"""
logger = logging.getLogger('AGNTruthWriter.write_auxiliary_truth')
if verbose:
logger.setLevel(logging.INFO)
bands = 'ugrizy'
curs = self.conn.execute(self.query)
table_name = 'agn_auxiliary_info'
cmd = f'''CREATE TABLE IF NOT EXISTS {table_name}
(id TEXT, host_galaxy BIGINT, M_i DOUBLE, seed BIGINT,
tau_u DOUBLE, tau_g DOUBLE, tau_r DOUBLE,
tau_i DOUBLE, tau_z DOUBLE, tau_y DOUBLE,
sf_u DOUBLE, sf_g DOUBLE, sf_r DOUBLE,
sf_i DOUBLE, sf_z DOUBLE, sf_y DOUBLE)'''
with sqlite3.connect(self.outfile) as conn:
cursor = conn.cursor()
cursor.execute(cmd)
conn.commit()
irec = 0
while True:
chunk = curs.fetchmany(chunk_size)
if not chunk:
break
values = []
logger.info('%d', irec)
for row in chunk:
irec += 1
pars = json.loads(row[self.icol['varParamStr']])['p']
my_row = [self.object_id(row[self.icol['galaxy_id']]),
row[self.icol['galaxy_id']],
row[self.icol['M_i']],
pars['seed']]
my_row.extend([pars[f'agn_tau_{band}'] for band in bands])
my_row.extend([pars[f'agn_sf_{band}'] for band in bands])
values.append(my_row)
cursor.executemany(f'''INSERT INTO {table_name} VALUES
(?, ?, ?, ?,
?, ?, ?, ?, ?, ?,
?, ?, ?, ?, ?, ?)''', values)
conn.commit()
def write_agn_variability_truth(agn_db_file, query, opsim_db_file,
start_mjd=59580.,
end_mjd=61405, fp_radius=2.05,
object_range=None, outfile=None,
verbose=False):
"""
Write the AGN fluxes for each visit.
Parameters
----------
agn_db_file: str
File containing the AGN model parameters.
query: str
Query string for the AGN parameters from agn_db_file.
opsim_db_file: str
The sqlite3 file containing the OpSim Summary table which
has the pointing information for each visit.
start_mjd: float [59580.]
Starting MJD for the visits to be used from the opsim db file.
The default is the start date of the minion 1016 db file.
end_mjd: float [61405.]
Ending MJD for the visits to be used from the opsim db file.
The default is the end of 5 years from the start date of the
minion 1016 db file.
fp_radius: float [2.05]
Effective radius of the focal plane in degrees. This defines
the acceptance cone centered on the pointing direction for
determining if an object is being observed by LSST for the
purpose of computing a flux entry for the visit to be entered
in the Variability Truth Table.
object_range: (int, int) [None]
The range of objects to process. This is useful for
testing. If None, then write all entries for all AGNs in
the agn_db_file.
outfile: str [None]
Output file for the `agn_variability_truth` table. If None,
then 'agn_variability_truth_cat.db' will be used.
"""
logger = logging.getLogger('write_agn_variability_truth')
if verbose:
logger.setLevel(logging.INFO)
bands = 'ugrizy'
# Retrieve the pointing information for each visit from the opsim db.
with sqlite3.connect(opsim_db_file) as conn:
opsim_df = pd.read_sql(
f'''select obsHistID, descDitheredRA, descDitheredDec, filter,
expMJD from Summary where expMJD >= {start_mjd} and
expMJD <= {end_mjd}''', conn)
opsim_df['ra'] = np.degrees(opsim_df['descDitheredRA'])
opsim_df['dec'] = np.degrees(opsim_df['descDitheredDec'])
# Read in the AGN parameters table so that ranges of rows
# can be easily handled.
with sqlite3.connect(agn_db_file) as con:
agn_df = pd.read_sql(query, con)
if object_range is None:
object_range = (0, len(agn_df))
# Create the Variability Truth table.
table_name = 'agn_variability_truth'
cmd = f'''CREATE TABLE IF NOT EXISTS {table_name}
(id TEXT, obsHistID INTEGER, MJD FLOAT, bandpass TEXT,
delta_flux FLOAT, num_photons FLOAT)'''
if outfile is None:
outfile = 'agn_variability_truth_cat.db'
with sqlite3.connect(outfile) as conn:
cursor = conn.cursor()
cursor.execute(cmd)
conn.commit()
# Loop over rows in AGN db and add the flux for each
# observation where the AGN is observed by LSST.
sed_file = find_sed_file('agnSED/agn.spec.gz')
for iloc in range(*object_range):
row = agn_df.iloc[iloc]
# Extract the AGN info and model parameters:
object_id = AGNTruthWriter.object_id(row['galaxy_id'])
logger.info('%d %s', iloc, object_id)
ra = row['ra']
dec = row['dec']
magNorm = row['magNorm']
redshift = row['redshift']
params = json.loads(row['varParamStr'])['p']
seed = params['seed']
# Compute baseline fluxes in each band.
synth_phot = SyntheticPhotometry(sed_file, magNorm, redshift)
gAv, gRv = synth_phot.add_MW_dust(ra, dec)
flux0 = {band: synth_phot.calcFlux(band) for band in bands}
# Select the visits from the opsim db in which the AGN
# is observed by applying cuts on the sky coordinates.
dec_cut = f'{dec - fp_radius} <= dec <= {dec + fp_radius}'
df = pd.DataFrame(opsim_df.query(dec_cut))
df['ang_sep'] = angularSeparation(df['ra'].to_numpy(),
df['dec'].to_numpy(), ra, dec)
df = df.query(f'ang_sep <= {fp_radius}')
# Compute delta fluxes for each band.
for band in bands:
phot_params = PhotometricParameters(nexp=1, exptime=30,
gain=1, bandpass=band)
bp = synth_phot.bp_dict[band]
tau = params[f'agn_tau_{band}']
sf = params[f'agn_sf_{band}']
my_df = df.query(f'filter == "{band}"')
if len(my_df) == 0:
continue
obsHistIDs = my_df['obsHistID'].to_list()
mjds = my_df['expMJD'].to_numpy()
mag_norms = (agn_mag_norms(mjds, redshift, tau, sf, seed)
+ magNorm)
values = []
for obsHistID, mjd, mag_norm in zip(obsHistIDs, mjds,
mag_norms):
synth_phot = SyntheticPhotometry(sed_file, mag_norm,
redshift=redshift,
gAv=gAv, gRv=gRv)
delta_flux = synth_phot.calcFlux(band) - flux0[band]
num_photons = synth_phot.sed.calcADU(bp, phot_params)
values.append((object_id, obsHistID, mjd, band,
delta_flux, num_photons))
cursor.executemany(f'''INSERT INTO {table_name} VALUES
(?, ?, ?, ?, ?, ?)''', values)
conn.commit()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import numpy as np
import tensorflow as tf
import onnx
from onnx_tf.backend import run_node, prepare
from onnx import helper
from onnx.onnx_pb2 import TensorProto
class TestModel(unittest.TestCase):
""" Tests for models
"""
def test_relu_node_inplace(self):
X = np.random.randn(3, 2).astype(np.float32)
Y_ref = np.clip(X, 0, np.inf)
node_def = helper.make_node(
"Relu", ["X"], ["X"])
graph_def = helper.make_graph(
[node_def],
name="test",
inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, [3, 2])],
outputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, [3, 2])])
tf_rep = prepare(helper.make_model(graph_def))
output = tf_rep.run({"X": X})
np.testing.assert_almost_equal(output.X, Y_ref)
def test_initializer(self):
X = np.array([[1, 2], [3, 4]]).astype(np.float32)
Y = np.array([[1, 2], [3, 4]]).astype(np.float32)
weight = np.array([[1, 0], [0, 1]])
graph_def = helper.make_graph(
[helper.make_node("Add", ["X", "Y"], ["Z0"]),
helper.make_node("Cast", ["Z0"], ["Z"], to="float"),
helper.make_node("Mul", ["Z", "weight"], ["W"]),
helper.make_node("Tanh", ["W"], ["W"]),
helper.make_node("Sigmoid", ["W"], ["W"])],
name="test_initializer",
inputs=[
helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 2)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 2)),
helper.make_tensor_value_info("weight", TensorProto.FLOAT, (2, 2)),
],
outputs=[
helper.make_tensor_value_info("W", TensorProto.FLOAT, (2, 2))
],
initializer=[helper.make_tensor("weight",
TensorProto.FLOAT,
[2, 2],
weight.flatten().astype(float))]
)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
W_ref = sigmoid(np.tanh((X + Y) * weight))
tf_rep = prepare(helper.make_model(graph_def))
output = tf_rep.run({"X": X, "Y": Y})
np.testing.assert_almost_equal(output["W"], W_ref)
if __name__ == '__main__':
unittest.main()
|
# coding=utf-8
# Copyright 2021
import tensorflow as tf
import numpy as np
import pickle
import re
import os.path
import argparse
import ConfMapper as cm
from pathlib import Path
from datetime import datetime
# Config
# Creat a parser instance as interface for command arguments
parser = argparse.ArgumentParser\
(description="default configuration: "
"batch_size = 60\n"
"c_length = 19\n"
"num_train = 11470\n"
"embedding_size = 128\n"
"num_attention_heads= 4\n"
"base_num=1\n"
"nblock = 3\n"
"epoches = int(num_train / batch_size)\n"
"num_training_steps = epoches * 500\n"
"use_GPU: default is None, which means tensorflow has access to all the device,"
"use_GPU parameter assigns values to environment variables CUDA_VISIBLE_DEVICES, "
"to give permission to specified GPUs.")
parser.add_argument('-b','--batch_size', type=int, default=60)
parser.add_argument('--nblock', type=int, default=3)
parser.add_argument('--embeddingsize', type=int, default=128)
parser.add_argument('--nheads', type=int, default=8)
parser.add_argument('-in', '--iter_num', type=int, default=5)
parser.add_argument('-nn', '--name_num', default='01')
parser.add_argument('-bn', '--base_num', type=int, default=1)
parser.add_argument('-en', '--epoch_num', type=int, default=1200)
parser.add_argument('-use_GPU', type=str, default=None)
args = parser.parse_args()
use_GPU = args.use_GPU
if use_GPU is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = use_GPU
batch_size = args.batch_size
nblock = args.nblock
embedding_size = args.embeddingsize
num_attention_heads = args.nheads
iter_num = args.iter_num
name_num = args.name_num
base_num = args.base_num
epoc = args.epoch_num
c_length = 19
num_train = 11470
epoches = int(num_train / batch_size)
num_training_steps = epoches * epoc
iter_width = 12
# data restoring directory
directory0 = "data/"
directory1 = 'data_HPSCC/'
accu_file_name = 'HPSCC_epoch%d_basenum%d_name%s.txt' % (epoc, base_num,name_num)
data_name = directory0 + directory1 + accu_file_name
# func0
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
# func1
def flatten_tensor_to_2d(tensor):
if tensor.shape.dim <= 2:
return tensor
else:
width = tensor.shape[-1]
output = tf.reshape(tensor, [-1, width])
return output
# func2
def get_shape_list(tensor):
# 获取动态shape(class:Tensor)的list
shape = tensor.shape.as_list()
# placeholder的None维度无法获取
non_static_index = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_index.append(index)
if not non_static_index:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_index:
shape[index] = dyn_shape[index]
return shape
# func3
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
return tf.contrib.layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
# func4
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
# layer 0 * 2
def HP_layer_lookup(input_seqs,
n_hidden=iter_width,
n_neuron=50,
initializer_range=0.02):
# 检查input dtype&shape
if input_seqs.shape.ndims != 2:
raise ValueError("input_seqs has unmatched ndims: %d" % input_seqs.shape.ndims)
# inputseqs就是x, 是一个int32,[-1,19]的tensor,由(0,1)两种取值构成
with tf.variable_scope("HP_layer", reuse=tf.AUTO_REUSE):
W_emb1 = tf.get_variable("W_emb1",
[2, n_hidden * n_neuron],
dtype=tf.float32,
initializer=create_initializer(initializer_range))
W_emb2 = tf.get_variable("W_emb2",
[2, n_hidden * n_neuron],
dtype=tf.float32,
initializer=create_initializer(initializer_range))
B_emb1 = tf.get_variable("B_emb1",
[2, n_neuron],
initializer=tf.constant_initializer(0.1))
B_emb2 = tf.get_variable("B_emb2",
[2, n_hidden],
initializer=tf.constant_initializer(0.1))
WHP1 = tf.reshape(tf.nn.embedding_lookup(W_emb1, input_seqs), [-1, c_length, n_hidden, n_neuron])
bHP1 = tf.reshape(tf.nn.embedding_lookup(B_emb1, input_seqs), [-1, c_length, n_neuron])
WHP2 = tf.reshape(tf.nn.embedding_lookup(W_emb2, input_seqs), [-1, c_length, n_neuron, n_hidden])
bHP2 = tf.reshape(tf.nn.embedding_lookup(B_emb2, input_seqs), [-1, c_length, n_hidden])
return (WHP1, bHP1,WHP2, bHP2)
def HP_layer(iter_xx, WHP1, bHP1,WHP2, bHP2,
n_hidden=iter_width,
n_neuron=50):
with tf.variable_scope("HP_layer", reuse=tf.AUTO_REUSE):
iter_xx = tf.reshape(iter_xx, [-1, c_length, n_hidden, 1])
HP_layer1 = gelu(tf.reduce_sum(iter_xx * WHP1, axis=-2) + bHP1)
HP_layer1 = tf.reshape(HP_layer1, [-1, c_length, n_neuron, 1])
HP_layer2 = gelu(tf.reduce_sum(HP_layer1 * WHP2, axis=-2) + bHP2)
return HP_layer2
# layer 1
def attention_layer(input_tensor,
num_attention_heads=num_attention_heads,
size_per_head=int(embedding_size / num_attention_heads),
attention_keep_probs=0.9,
initializer_range=0.02,
act=None,
name="attention_layer"):
def transpose_to_multiheads(tensor):
# id_nums = tensor.shape[1]
tensor = tf.reshape(tensor, [-1,id_nums, num_attention_heads, size_per_head])
output_tensor = tf.transpose(tensor, [0, 2, 1, 3])
return output_tensor
if input_tensor.shape.ndims != 3:
print(input_tensor.shape)
raise ValueError("One batch of embedding 19mer should be rank 3")
# if size_per_head * num_attention_heads != input_tensor.shape[-1]:
# raise ValueError("size_per_head * num_attention_heads == hidden_size")
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
shape = input_tensor.shape.as_list()
id_nums = shape[1]
query_layer = tf.layers.dense(input_tensor,
num_attention_heads * size_per_head,
activation=act,
name="query",
kernel_initializer=create_initializer(initializer_range),
reuse=tf.AUTO_REUSE)
key_layer = tf.layers.dense(input_tensor,
num_attention_heads * size_per_head,
activation=act,
name="key",
kernel_initializer=create_initializer(initializer_range),
reuse=tf.AUTO_REUSE)
value_layer = tf.layers.dense(input_tensor,
num_attention_heads * size_per_head,
activation=act,
name="value",
kernel_initializer=create_initializer(initializer_range),
reuse=tf.AUTO_REUSE)
q = transpose_to_multiheads(query_layer) # [-1, num_heads, id_num, head_size]
k = transpose_to_multiheads(key_layer)
v = transpose_to_multiheads(value_layer)
attention_score = tf.matmul(q, tf.transpose(k, [0, 1, 3, 2]), name="attention_score")
a_s_probs = tf.nn.softmax(tf.multiply(attention_score, 1.0 / tf.sqrt(float(size_per_head))),
axis=-1,
name="attention_score_probs")
a_s_probs = tf.nn.dropout(a_s_probs, attention_keep_probs, name="attention_score_dropout")
context_layer = tf.transpose(tf.matmul(a_s_probs, v), [0, 2, 1, 3])
context_layer = tf.reshape(context_layer,
[-1, id_nums, num_attention_heads * size_per_head],
name="context_layer")
return context_layer
# module 0
def encoder_block(input_tensor,
iter_width=iter_width,
dense_width=embedding_size,
initializer_range=0.02,
intermediate_act=gelu,
intermediate_size=768,
blockname="encoder_block",
dense_keep_prob=0.9,
attention_keep_probs=0.9):
with tf.variable_scope(blockname, reuse=tf.AUTO_REUSE):
attention_output = attention_layer(input_tensor, attention_keep_probs=attention_keep_probs)
dense1 = tf.layers.dense(attention_output,
iter_width,
kernel_initializer=create_initializer(initializer_range),
activation=None,
name="Dense1",
reuse=tf.AUTO_REUSE)
dropout_dense1 = tf.nn.dropout(dense1, keep_prob=dense_keep_prob)
residual_norm_output1 = layer_norm(dropout_dense1 + input_tensor)
# 等会试试在intermediate层把它们混在一起
residual_norm_output1_ = tf.reshape(residual_norm_output1, [-1, c_length, iter_width, 1])
W_conv1 = tf.get_variable(name="W_conv1",
shape=[5, 5, 1, 64],
dtype=tf.float32,
initializer=create_initializer(initializer_range))
W_conv2 = tf.get_variable(name="W_conv2",
shape=[4, 3, 64, 64],
dtype=tf.float32,
initializer=create_initializer(initializer_range))
b_conv1 = tf.get_variable(name="b_conv1",
shape=[64],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
b_conv2 = tf.get_variable(name="b_conv1",
shape=[64],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
conv1 = tf.nn.relu(tf.nn.conv2d(residual_norm_output1_, W_conv1, strides=[1, 3, 1, 1],
padding='SAME') + b_conv1)
conv2 = tf.nn.relu(tf.nn.conv2d(conv1, W_conv2, strides=[1, 1, 1, 1],
padding='VALID') + b_conv2)
conv2 = tf.reshape(conv2, [-1, 4 * 64 * 10])
dense2 = tf.layers.dense(conv2,
iter_width * c_length,
name="Dense2",
kernel_initializer=create_initializer(initializer_range),
reuse=tf.AUTO_REUSE)
dense2_ = tf.reshape(dense2, [-1, c_length, iter_width])
dropout_dense2 = tf.nn.dropout(dense2_, keep_prob=dense_keep_prob)
residual_norm_output2 = layer_norm(dropout_dense2 + residual_norm_output1)
residual_norm_output2_ = tf.reshape(residual_norm_output2, [-1, c_length, iter_width, 1])
W_conv3 = tf.get_variable(name="W_conv3",
shape=[5, 5, 1, 64],
dtype=tf.float32,
initializer=create_initializer(initializer_range))
W_conv4 = tf.get_variable(name="W_conv4",
shape=[3, 3, 64, 64],
dtype=tf.float32,
initializer=create_initializer(initializer_range))
b_conv3 = tf.get_variable(name="b_conv3",
shape=[64],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
b_conv4 = tf.get_variable(name="b_conv4",
shape=[64],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
conv3 = tf.nn.relu(tf.nn.conv2d(residual_norm_output2_, W_conv3, strides=[1, 1, 1, 1],
padding='VALID') + b_conv3)
conv4 = tf.nn.relu(tf.nn.conv2d(conv3, W_conv4, strides=[1, 1, 1, 1],
padding='VALID') + b_conv4)
conv4 = tf.reshape(conv4, [-1, 13 * 64 * (iter_width - 6)])
dense3 = tf.layers.dense(conv4,
iter_width * c_length,
name="Dense3",
kernel_initializer=create_initializer(initializer_range),
reuse=tf.AUTO_REUSE)
dense3 = tf.reshape(dense3,[-1, c_length, iter_width])
dropout_dense3 = tf.nn.dropout(dense3, keep_prob=dense_keep_prob)
residual_norm_output3 = layer_norm(dropout_dense3 + residual_norm_output2)
return residual_norm_output3
# model
def attentionCNN_model(input_tensor,
num_of_blocks=nblock,
dense_width=embedding_size,
iter_width=iter_width,
initializer_range=0.02,
intermediate_act=tf.nn.relu,
intermediate_size=768,
dense_keep_prob=0.9,
attention_keep_probs=0.9,
return_all_layer=False,
encoder_block_name="encoder_block"):
all_block_outputs = []
prev_block_output = input_tensor
for layer_index in range(num_of_blocks):
layer_output = encoder_block(prev_block_output,
iter_width=iter_width,
dense_width=dense_width,
initializer_range=initializer_range,
intermediate_act=intermediate_act,
intermediate_size=intermediate_size,
dense_keep_prob=dense_keep_prob,
attention_keep_probs=attention_keep_probs,
blockname="%s%d"
% (encoder_block_name, layer_index))
all_block_outputs.append(layer_output)
prev_block_output = layer_output
if return_all_layer:
return all_block_outputs
else:
return all_block_outputs[-1]
def output_layer(input, base_num=base_num):
with tf.name_scope("output_layer"):
dense_output = tf.layers.dense(tf.reshape(input, [-1, c_length * iter_width]),
1024,
kernel_initializer=create_initializer(),
activation=tf.nn.relu,
name="dense_output")
output = tf.layers.dense(dense_output,
cm.get_outputshape(base_num)[0] * cm.get_outputshape(base_num)[1],
kernel_initializer=create_initializer(),
name="y_output")
output_probs = tf.nn.softmax(tf.reshape(output,
[-1, cm.get_outputshape(base_num)[0], cm.get_outputshape(base_num)[1]]),
axis=-1, name="output_probs")
return output_probs
# Training Model
def ANN_HPSCC_model(x, y_, iter_xx, base_num=base_num):
iteration_xx = iter_xx
WHP1, bHP1, WHP2, bHP2 = HP_layer_lookup(x)
iteration_xx_output = None
for i in range(iter_num):
iter_output = HP_layer(iteration_xx, WHP1, bHP1, WHP2, bHP2)
iteration_xx_output = attentionCNN_model(iter_output,
dense_keep_prob=dropout_keep_probs,
attention_keep_probs=dropout_keep_probs)
iteration_xx = tf.nn.softmax(iteration_xx_output, axis=-1)
y = output_layer(iteration_xx_output)
y_ = tf.one_hot(y_, cm.get_outputshape(base_num)[1])
with tf.name_scope('loss_optimization'):
cross_entropy = tf.reduce_mean(tf.reduce_sum(-tf.log(y + 1e-8) * y_,
reduction_indices=[-2, -1]), name="cross_entropy")
return y, cross_entropy
# Evaluation Metrics
def accuracy_metrics(y_pred, y_label, base_num, num_seq=19):
with tf.name_scope('Accuracy_metrics'):
y_pred = tf.cast(tf.argmax(y_pred, axis=-1), tf.int32)
y_label = tf.cast(y_label, tf.int32)
y_pred = cm.convert_dectoter(y_pred, base_num)
y_label = cm.convert_dectoter(y_label, base_num)
accu_per_position = tf.equal(y_pred, y_label) # shape: [Batch, 19]
accu_per_seq = tf.reduce_sum(tf.cast(accu_per_position, tf.int32), axis=-1) # shape: [Batch]
accs_train_ones = tf.ones_like(accu_per_seq)
accuracy_distribution = []
accuracy_distribution_test = []
for i in range(num_seq + 1):
accuracy_i = tf.equal(accu_per_seq, accs_train_ones * i) # 也就是Batch 个 i
accuracy_distribution.append(tf.reduce_mean(tf.cast(accuracy_i, tf.float32)))
accuracy_distribution_test.append(tf.reduce_mean(tf.cast(accuracy_i, tf.float32)))
return accuracy_distribution
# feed_dict
with tf.name_scope('input_output_placeholder'):
x = tf.placeholder(tf.int32, [None, c_length], 'input_seq_x')
iter_xx = tf.placeholder(tf.float32, [None, c_length, iter_width], 'iter_xx')
y_ = tf.placeholder(tf.int32, [None, cm.get_outputshape(base_num)[0]], 'output_label_y')
dropout_keep_probs = tf.placeholder(tf.float32, [], 'dropout_keep_probs')
y, loss = ANN_HPSCC_model(x, y_, iter_xx)
train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)
accuracy_distribution = accuracy_metrics(y, y_, base_num=base_num)
def main_func(input_train, output_train, input_test, output_test, base_num=base_num):
with tf.Session() as sess:
# Session initialization
sess.run(tf.global_variables_initializer())
idx = np.arange(num_train)
accu_test_list = []
iter_xx_init = np.ones((batch_size, c_length, iter_width), np.float32) * (1.0 / iter_width)
for i in range(num_training_steps):
# Training Process
np.random.shuffle(idx)
train_dict = {x: input_train[idx[0:batch_size]],
y_: cm.convert_tertodec(output_train[idx[0:batch_size]], base_num),
iter_xx: iter_xx_init,
dropout_keep_probs: 0.9}
sess.run(train_step, feed_dict=train_dict)
# Evaluation Metrics
if i % 100 == 0:
# train accuracy of 1000 training samples
accu_train_dict = {x: input_train[idx[0:1000]],
y_: cm.convert_tertodec(output_train[idx[0:1000]], base_num),
iter_xx: np.ones((1000, c_length, iter_width), np.float32) * (1.0 / iter_width),
dropout_keep_probs: 1.0}
accuracy_list_train, loss_eval = sess.run((accuracy_distribution, loss),
feed_dict=accu_train_dict)
print("Step %d, Train Accuracy Distribution:\n" % i, accuracy_list_train)
print("Train Cross Entropy:", loss_eval)
# test accuracy of test sets
accu_test_dict = {x: input_test,
y_: cm.convert_tertodec(output_test, base_num),
iter_xx: np.ones((2000, c_length, iter_width), np.float32) * (1.0 / iter_width),
dropout_keep_probs: 1.0}
accuracy_list_test = sess.run(accuracy_distribution, feed_dict=accu_test_dict)
accu_test_list.append(accuracy_list_test[-1])
with open(data_name, 'wb') as data:
pickle.dump(accu_test_list, data)
print("Step %d, Test Accuracy Distribution:\n" % i, accuracy_list_test)
if len(accu_test_list) > 50:
print("*" * 50, "Test set latest accuracies:", sep="\n")
print(accu_test_list[-50:-1], end="\n" + "*" * 50 + '\n')
else:
print("*" * 50, "Test set latest accuracies:", sep="\n")
print(accu_test_list, end="\n" + "*" * 50 + '\n')
if __name__ == "__main__":
# Data Pipeline:
with open('dataset/HP19trainset11470.txt', 'rb') as f:
trainset = pickle.load(f)
input_train = np.array(trainset['input'], dtype=np.int32) # [1,0,0,1,1,0,0,0....]
output_train = np.array(trainset['output'], dtype=np.int32) + 1 # [1,0,-1] |--> [0,1,2]
with open('dataset/HP19testset2000.txt', 'rb') as f:
testset = pickle.load(f)
input_test = np.array(testset['input'], dtype=np.int32) # [1,0,0,1,1,0,0,0....]
output_test = np.array(testset['output'], dtype=np.int32) + 1 # [1,0,-1] |--> [0,1,2]
main_func(input_train, output_train, input_test, output_test, base_num=base_num)
|
from math import e
import warnings
from typing import Dict, List, Tuple
import numpy as np
import pandas as pd
from ..optimize import Optimizer
from .optimal_scaling_problem import OptimalScalingProblem
from .parameter import InnerParameter
from .problem import InnerProblem
from .solver import InnerSolver
REDUCED = 'reduced'
STANDARD = 'standard'
MAXMIN = 'max-min'
MAX = 'max'
class OptimalScalingInnerSolver(InnerSolver):
"""
Solve the inner subproblem of the
optimal scaling approach for ordinal data.
"""
def __init__(self,
optimizer: Optimizer = None,
options: Dict = None):
self.optimizer = optimizer
self.options = options
if self.options is None:
self.options = OptimalScalingInnerSolver.get_default_options()
if self.options['method'] == STANDARD \
and self.options['reparameterized']:
raise NotImplementedError(
'Combining standard approach with '
'reparameterization not implemented.'
)
self.x_guesses = None
def solve(
self,
problem: InnerProblem,
sim: List[np.ndarray],
sigma: List[np.ndarray],
scaled: bool,
) -> list:
"""
Get results for every group (inner optimization problem)
Parameters
----------
problem:
InnerProblem from pyPESTO hierarchical
sim:
Simulations from AMICI
sigma:
List of sigmas (not needed for this approach)
scaled:
...
"""
optimal_surrogates = []
#print("EVO SIM:", sim)
for gr in problem.get_groups_for_xs(InnerParameter.OPTIMALSCALING):
xs = problem.get_xs_for_group(gr)
if (gr in problem.hard_constraints.group.values):
#print(gr, "Tu sam")
hard_constraints = problem.get_hard_constraints_for_group(gr)
#print(hard_constraints)
obj = calculate_obj_fun_for_hard_constraints(xs, sim, self.options, hard_constraints)
#fake optimization results, explain more ZEBO
surrogate_opt_results_from_hard_constraints = {'success' : True, 'fun' : obj}
optimal_surrogates.append(surrogate_opt_results_from_hard_constraints)
continue
#print("Running for group: ", gr)
surrogate_opt_results = optimize_surrogate_data(xs, sim, self.options)
optimal_surrogates.append(surrogate_opt_results)
return optimal_surrogates
@staticmethod
def calculate_obj_function(x_inner_opt: list):
"""
Calculate the inner objective function from a list of inner
optimization results returned from compute_optimal_surrogate_data
Parameters
----------
x_inner_opt:
List of optimization results
"""
if False in [x_inner_opt[idx]['success'] for idx in range(len(x_inner_opt))]:
obj = np.nan
warnings.warn(f"Inner optimization failed.")
else:
obj = np.sum(
[x_inner_opt[idx]['fun'] for idx in range(len(x_inner_opt))]
)
# print(obj)
#print("I calculated the obj function with optimized inner pars")
return obj
def calculate_gradients(self,
problem: OptimalScalingProblem,
x_inner_opt,
sim,
sy,
parameter_mapping,
par_opt_ids,
amici_model,
snllh,
):
#breakpoint()
condition_map_sim_var = parameter_mapping[0].map_sim_var
#print(condition_map_sim_var)
par_sim_ids = list(amici_model.getParameterIds())
par_sim_idx=-1
#print(par_sim_ids)
# TODO: Doesn't work with condition specific parameters
for par_sim, par_opt in condition_map_sim_var.items():
if not isinstance(par_opt, str):
continue
if par_opt.startswith('optimalScaling_'):
continue
#par_sim_idx = par_sim_ids.index(par_sim) ZEBO REPLACE
par_sim_idx += 1
par_opt_idx = par_opt_ids.index(par_opt)
grad = 0.0
#print(par_sim, par_opt)
for idx, gr in enumerate(problem.get_groups_for_xs(InnerParameter.OPTIMALSCALING)):
if (gr in problem.hard_constraints.group.values): #group of hard constraint measurements
hard_constraints = problem.get_hard_constraints_for_group(gr)
xi = get_xi_for_hard_constraints(gr, problem, hard_constraints, sim, self.options)
sim_all = get_sim_all(problem.get_xs_for_group(gr), sim)
sy_all = get_sy_all(problem.get_xs_for_group(gr), sy, par_sim_idx)
#print(sim_all)
#print(sy_all)
problem.groups[gr]['W'] = problem.get_w(gr, sim_all)
problem.groups[gr]['Wdot'] = problem.get_wdot(gr, sim_all, sy_all)
res = np.block([xi[:problem.groups[gr]['num_datapoints']] - sim_all,
np.zeros(problem.groups[gr]['num_inner_params'] - problem.groups[gr]['num_datapoints'])])
#print(res)
dy_dtheta = get_dy_dtheta(gr, problem, sy_all)
df_dtheta = res.dot(res.dot(problem.groups[gr]['Wdot']) - 2*problem.groups[gr]['W'].dot(dy_dtheta)) # -2 * problem.W.dot(dy_dtheta).dot(res)
grad += df_dtheta
continue
xi = get_xi(gr, problem, x_inner_opt[idx], sim, self.options)
sim_all = get_sim_all(problem.get_xs_for_group(gr), sim)
sy_all = get_sy_all(problem.get_xs_for_group(gr), sy, par_sim_idx)
#print("sim_all for group ", gr, ": \n", sim_all)
#breakpoint()
#print(sy_all)
problem.groups[gr]['W'] = problem.get_w(gr, sim_all)
problem.groups[gr]['Wdot'] = problem.get_wdot(gr, sim_all, sy_all)
res = np.block([xi[:problem.groups[gr]['num_datapoints']] - sim_all,
np.zeros(problem.groups[gr]['num_inner_params'] - problem.groups[gr]['num_datapoints'])])
#print(res)
df_dxi = 2 * problem.groups[gr]['W'].dot(res)
dy_dtheta = get_dy_dtheta(gr, problem, sy_all)
dd_dtheta = problem.get_dd_dtheta(gr, problem.get_xs_for_group(gr), sim_all, sy_all)
d = problem.get_d(gr, problem.get_xs_for_group(gr), sim_all, self.options['minGap'])
mu = get_mu(gr, problem, xi, res, d)
dxi_dtheta = calculate_dxi_dtheta(gr, problem, xi, mu, dy_dtheta, res, d, dd_dtheta)
df_dtheta = res.dot(res.dot(problem.groups[gr]['Wdot']) - 2*problem.groups[gr]['W'].dot(dy_dtheta)) # -2 * problem.W.dot(dy_dtheta).dot(res)
grad += dxi_dtheta.dot(df_dxi) + df_dtheta
snllh[par_opt_idx] = grad
#print("I calculated the grad with optimized inner pars")
return snllh
@staticmethod
def get_default_options() -> Dict:
"""
Return default options for solving the inner problem,
if no options provided
"""
options = {'method': 'reduced',
'reparameterized': True,
'intervalConstraints': 'max',
'minGap': 1e-16}
return options
def calculate_dxi_dtheta(gr,
problem: OptimalScalingProblem,
xi,
mu,
dy_dtheta,
res,
d,
dd_dtheta):
from scipy.sparse import csc_matrix, linalg
A = np.block([[2 * problem.groups[gr]['W'], problem.groups[gr]['C'].transpose()],
[(mu*problem.groups[gr]['C'].transpose()).transpose(), np.diag(problem.groups[gr]['C'].dot(xi) + d)]])
A_sp = csc_matrix(A)
b = np.block(
[2 * dy_dtheta.dot(problem.groups[gr]['W']) - 2*problem.groups[gr]['Wdot'].dot(res), -mu*dd_dtheta])
dxi_dtheta = linalg.spsolve(A_sp, b)
return dxi_dtheta[:problem.groups[gr]['num_inner_params']]
def get_dy_dtheta(gr,
problem: OptimalScalingProblem,
sy_all):
return np.block([sy_all, np.zeros(2*problem.groups[gr]['num_categories'])])
def get_mu(gr,
problem: OptimalScalingProblem,
xi,
res,
d):
from scipy import linalg
'''
mu = np.zeros(problem.groups[gr]['num_constr_full'])
mu_zero_indices = np.array(problem.groups[gr]['C'].dot(xi) - d).nonzero()[0]
mu_non_zero_indices = np.where(np.array(problem.groups[gr]['C'].dot(xi) - d) == 0)[0]
A = problem.groups[gr]['C'].transpose()[:, mu_non_zero_indices]
mu_non_zero = linalg.lstsq(A, -2*res.dot(problem.groups[gr]['W']))[0]
mu[mu_non_zero_indices] = mu_non_zero
'''
mu = linalg.lstsq(problem.groups[gr]['C'].transpose(), -2*res.dot(problem.groups[gr]['W']), lapack_driver='gelsy')
return mu[0]
def get_xi(gr,
problem: OptimalScalingProblem,
x_inner_opt: Dict,
sim: List[np.ndarray],
options: Dict):
xs = problem.get_xs_for_group(gr)
interval_range, interval_gap = \
compute_interval_constraints(xs, sim, options)
xi = np.zeros(problem.groups[gr]['num_inner_params'])
surrogate_all, x_lower, x_upper = \
get_surrogate_all(xs, x_inner_opt['x'], sim, interval_range, interval_gap, options)
xi[:problem.groups[gr]['num_datapoints']] = surrogate_all.flatten()
xi[problem.groups[gr]['lb_indices']] = x_lower
xi[problem.groups[gr]['ub_indices']] = x_upper
return xi
def optimize_surrogate_data(xs: List[InnerParameter],
sim: List[np.ndarray],
options: Dict):
"""Run optimization for inner problem"""
from scipy.optimize import minimize
interval_range, interval_gap = \
compute_interval_constraints(xs, sim, options)
w = get_weight_for_surrogate(xs, sim)
def obj_surr(x):
return obj_surrogate_data(xs, x, sim, interval_gap,
interval_range, w, options)
inner_options = \
get_inner_options(options, xs, sim, interval_range, interval_gap)
try:
results = minimize(obj_surr, **inner_options)
except:
print('x0 violate bound constraints. Retrying with array of zeros.')
inner_options['x0'] = np.zeros(len(inner_options['x0']))
results = minimize(obj_surr, **inner_options)
return results
def get_inner_options(options: Dict,
xs: List[InnerParameter],
sim: List[np.ndarray],
interval_range: float,
interval_gap: float) -> Dict:
"""Return default options for scipy optimizer"""
from scipy.optimize import Bounds
min_all, max_all = get_min_max(xs, sim)
# print("Evo max", max_all)
if options['method'] == REDUCED:
parameter_length = len(xs)
x0 = np.linspace(
np.max([min_all, interval_range]),
max_all + (interval_range + interval_gap)*parameter_length,
parameter_length
)
#print("Min", min_all, "i max", max_all)
#print("Evo i x0", x0)
elif options['method'] == STANDARD:
parameter_length = 2 * len(xs)
x0 = np.linspace(0, max_all + interval_range, parameter_length)
else:
raise NotImplementedError(
f"Unkown optimal scaling method {options['method']}. "
f"Please use {STANDARD} or {REDUCED}."
)
if options['reparameterized']:
x0 = y2xi(x0, xs, interval_gap, interval_range)
bounds = Bounds([0.0] * parameter_length, [max_all + (interval_range + interval_gap)*parameter_length] * parameter_length)
inner_options = {'x0': x0, 'method': 'L-BFGS-B',
'options': {'maxiter': 2000, 'ftol': 1e-10},
'bounds': bounds}
else:
constraints = get_constraints_for_optimization(xs, sim, options)
inner_options = {'x0': x0, 'method': 'SLSQP',
'options': {'maxiter': 2000, 'ftol': 1e-10, 'disp': True},
'constraints': constraints}
return inner_options
def get_min_max(xs: List[InnerParameter],
sim: List[np.ndarray]) -> Tuple[float, float]:
"""Return minimal and maximal simulation value"""
sim_all = get_sim_all(xs, sim)
min_all = np.min(sim_all)
max_all = np.max(sim_all)
return min_all, max_all
def get_sy_all(xs, sy, par_idx):
sy_all = []
for x in xs:
for sy_i, mask_i in \
zip(sy, x.ixs):
sim_sy = sy_i[:, par_idx, :][mask_i]
#if mask_i.any():
for sim_sy_i in sim_sy:
sy_all.append(sim_sy_i)
return np.array(sy_all)
def get_sim_all(xs, sim: List[np.ndarray]) -> list:
""""Get list of all simulations for all xs"""
sim_all = []
for x in xs:
for sim_i, mask_i in \
zip(sim, x.ixs):
sim_x = sim_i[mask_i]
#if mask_i.any():
for sim_x_i in sim_x:
sim_all.append(sim_x_i)
#print("Evo sim all: ", sim_all)
return sim_all
def get_surrogate_all(xs,
optimal_scaling_bounds,
sim,
interval_range,
interval_gap,
options):
if options['reparameterized']:
optimal_scaling_bounds = \
xi2y(optimal_scaling_bounds, xs, interval_gap, interval_range)
surrogate_all = []
x_lower_all = []
x_upper_all = []
for x in xs:
x_upper, x_lower = \
get_bounds_for_category(
x, optimal_scaling_bounds, interval_gap, options
)
#print("Upper:", x_upper, "\n lower:", x_lower)
for sim_i, mask_i in \
zip(sim, x.ixs):
#if mask_i.any():
y_sim = sim_i[mask_i]
for y_sim_i in y_sim:
if x_lower > y_sim_i:
y_surrogate = x_lower
elif y_sim_i > x_upper:
y_surrogate = x_upper
elif x_lower <= y_sim_i <= x_upper:
y_surrogate = y_sim_i
else:
continue
surrogate_all.append(y_surrogate)
x_lower_all.append(x_lower)
x_upper_all.append(x_upper)
return np.array(surrogate_all), np.array(x_lower_all), np.array(x_upper_all)
def get_weight_for_surrogate(xs: List[InnerParameter],
sim: List[np.ndarray]) -> float:
"""Calculate weights for objective function"""
sim_x_all = get_sim_all(xs, sim)
eps = 1e-8
# v_net = 0
# for idx in range(len(sim_x_all) - 1):
# v_net += np.abs(sim_x_all[idx + 1] - sim_x_all[idx])
# w = 0.5 * np.sum(np.abs(sim_x_all)) + v_net + eps
# print(w ** 2)
return np.sum(np.abs(sim_x_all)) + eps # TODO: w ** 2
def compute_interval_constraints(xs: List[InnerParameter],
sim: List[np.ndarray],
options: Dict) -> Tuple[float, float]:
"""Compute minimal interval range and gap"""
# compute constraints on interval size and interval gap size
# similar to Pargett et al. (2014)
if 'minGap' not in options:
eps = 1e-16
else:
eps = options['minGap']
min_simulation, max_simulation = get_min_max(xs, sim)
if options['intervalConstraints'] == MAXMIN:
interval_range = \
(max_simulation - min_simulation) / (2 * len(xs) + 1)
interval_gap = \
(max_simulation - min_simulation) / (4 * (len(xs) - 1) + 1)
elif options['intervalConstraints'] == MAX:
interval_range = max_simulation / (2 * len(xs) + 1)
interval_gap = max_simulation / (4 * (len(xs) - 1) + 1)
else:
raise ValueError(
f"intervalConstraints = "
f"{options['intervalConstraints']} not implemented. "
f"Please use {MAX} or {MAXMIN}."
)
#if interval_gap < eps:
# interval_gap = eps
return interval_range, interval_gap + eps
def y2xi(optimal_scaling_bounds: np.ndarray,
xs: List[InnerParameter],
interval_gap: float,
interval_range: float) -> np.ndarray:
"""Get optimal scaling bounds and return reparameterized parameters"""
optimal_scaling_bounds_reparameterized = \
np.full(shape=(np.shape(optimal_scaling_bounds)), fill_value=np.nan)
for x in xs:
x_category = int(x.category)
if x_category == 1:
optimal_scaling_bounds_reparameterized[x_category - 1] = \
optimal_scaling_bounds[x_category - 1] \
- interval_range
else:
optimal_scaling_bounds_reparameterized[x_category - 1] = \
optimal_scaling_bounds[x_category - 1] \
- optimal_scaling_bounds[x_category - 2] \
- interval_gap - interval_range
return optimal_scaling_bounds_reparameterized
def xi2y(
optimal_scaling_bounds_reparameterized: np.ndarray,
xs: List[InnerParameter],
interval_gap: float,
interval_range: float) -> np.ndarray:
"""
Get reparameterized parameters and
return original optimal scaling bounds
"""
# TODO: optimal scaling parameters in
# parameter sheet have to be ordered at the moment
optimal_scaling_bounds = \
np.full(shape=(np.shape(optimal_scaling_bounds_reparameterized)),
fill_value=np.nan)
for x in xs:
x_category = int(x.category)
if x_category == 1:
optimal_scaling_bounds[x_category - 1] = \
interval_range + optimal_scaling_bounds_reparameterized[
x_category - 1]
else:
optimal_scaling_bounds[x_category - 1] = \
optimal_scaling_bounds_reparameterized[x_category - 1] + \
interval_gap + interval_range + optimal_scaling_bounds[
x_category - 2]
return optimal_scaling_bounds
def obj_surrogate_data(xs: List[InnerParameter],
optimal_scaling_bounds: np.ndarray,
sim: List[np.ndarray],
interval_gap: float,
interval_range: float,
w: float,
options: Dict) -> float:
"""compute optimal scaling objective function"""
obj = 0.0
if options['reparameterized']:
optimal_scaling_bounds = \
xi2y(optimal_scaling_bounds, xs, interval_gap, interval_range)
for x in xs:
x_upper, x_lower = \
get_bounds_for_category(
x, optimal_scaling_bounds, interval_gap, options
)
for sim_i, mask_i in \
zip(sim, x.ixs):
#if mask_i.any():
y_sim = sim_i[mask_i]
for y_sim_i in y_sim:
if x_lower > y_sim_i:
y_surrogate = x_lower
elif y_sim_i > x_upper:
y_surrogate = x_upper
elif x_lower <= y_sim_i <= x_upper:
y_surrogate = y_sim_i
else:
continue
obj += (y_surrogate - y_sim_i) ** 2
obj = np.divide(obj, w)
# print("Evo objective:", obj)
return obj
def get_bounds_for_category(x: InnerParameter,
optimal_scaling_bounds: np.ndarray,
interval_gap: float,
options: Dict) -> Tuple[float, float]:
"""Return upper and lower bound for a specific category x"""
x_category = int(x.category)
if options['method'] == REDUCED:
x_upper = optimal_scaling_bounds[x_category - 1]
if x_category == 1:
x_lower = 0
elif x_category > 1:
x_lower = optimal_scaling_bounds[x_category - 2] + 0.5 * interval_gap
else:
raise ValueError('Category value needs to be larger than 0.')
elif options['method'] == STANDARD:
x_lower = optimal_scaling_bounds[2 * x_category - 2]
x_upper = optimal_scaling_bounds[2 * x_category - 1]
else:
raise NotImplementedError(
f"Unkown optimal scaling method {options['method']}. "
f"Please use {REDUCED} or {STANDARD}."
)
return x_upper, x_lower
def get_constraints_for_optimization(xs: List[InnerParameter],
sim: List[np.ndarray],
options: Dict) -> Dict:
"""Return constraints for inner optimization"""
num_categories = len(xs)
interval_range, interval_gap = \
compute_interval_constraints(xs, sim, options)
if options['method'] == REDUCED:
a = np.diag(-np.ones(num_categories), -1) \
+ np.diag(np.ones(num_categories + 1))
a = a[:-1, :-1]
b = np.empty((num_categories,))
b[0] = interval_range
b[1:] = interval_range + interval_gap
elif options['method'] == STANDARD:
a = np.diag(-np.ones(2 * num_categories), -1) \
+ np.diag(np.ones(2 * num_categories + 1))
a = a[:-1, :]
a = a[:, :-1]
b = np.empty((2 * num_categories,))
b[0] = 0
b[1::2] = interval_range
b[2::2] = interval_gap
ineq_cons = {'type': 'ineq', 'fun': lambda x: a.dot(x) - b}
return ineq_cons
def calculate_obj_fun_for_hard_constraints(xs: List[InnerParameter],
sim: List[np.ndarray],
options: Dict,
hard_constraints: pd.DataFrame):
interval_range, interval_gap = \
compute_interval_constraints(xs, sim, options)
w = get_weight_for_surrogate(xs, sim)
obj = 0.0
parameter_length = len(xs)
min_all, max_all = get_min_max(xs, sim)
max_upper = max_all + (interval_range + interval_gap)*parameter_length
for x in xs:
x_upper, x_lower = \
get_bounds_from_hard_constraints(
x, hard_constraints, max_upper, interval_gap
)
for sim_i, mask_i in \
zip(sim, x.ixs):
#if mask_i.any():
y_sim = sim_i[mask_i]
for y_sim_i in y_sim:
if x_lower > y_sim_i:
y_surrogate = x_lower
elif y_sim_i > x_upper:
y_surrogate = x_upper
elif x_lower <= y_sim_i <= x_upper:
y_surrogate = y_sim_i
else:
continue
obj += (y_surrogate - y_sim_i) ** 2
obj = np.divide(obj, w)
return obj
def get_bounds_from_hard_constraints(x: InnerParameter,
hard_constraints: pd.DataFrame,
max_upper: float,
interval_gap: float) -> Tuple[float, float]:
x_category = int(x.category)
constraint = hard_constraints[hard_constraints['category']==x_category]
lower_constraint=-1
upper_constraint=-1
measurement = constraint['measurement'].values[0]
measurement = measurement.replace(" ", "")
if('<' in measurement and '>' in measurement):
lower_constraint = float(measurement.split(',')[0][1:])
upper_constraint = float(measurement.split(',')[1][1:])
elif('<' in measurement):
upper_constraint = float(measurement[1:])
elif('>' in measurement):
lower_constraint = float(measurement[1:])
#print("bounds point", x_category, measurement, lower_constraint, upper_constraint)
if(upper_constraint == -1):
x_upper = max_upper
else:
x_upper = upper_constraint
if(lower_constraint!=-1 ):
#print("lower constraint in action")
x_lower=lower_constraint + 1e-6
elif(x_category == 1):
#print("no lower constraint")
x_lower = 0
return x_upper, x_lower
def get_xi_for_hard_constraints(gr,
problem: OptimalScalingProblem,
hard_constraints: pd.DataFrame,
sim: List[np.ndarray],
options: Dict):
xs = problem.get_xs_for_group(gr)
interval_range, interval_gap = \
compute_interval_constraints(xs, sim, options)
parameter_length = len(xs)
min_all, max_all = get_min_max(xs, sim)
max_upper = max_all + (interval_range + interval_gap)*parameter_length
xi = np.zeros(problem.groups[gr]['num_inner_params'])
surrogate_all = []
x_lower_all = []
x_upper_all = []
for x in xs:
x_upper, x_lower = \
get_bounds_from_hard_constraints(
x, hard_constraints, max_upper, interval_gap
)
for sim_i, mask_i in \
zip(sim, x.ixs):
#if mask_i.any():
y_sim = sim_i[mask_i]
for y_sim_i in y_sim:
if x_lower > y_sim_i:
y_surrogate = x_lower
elif y_sim_i > x_upper:
y_surrogate = x_upper
elif x_lower <= y_sim_i <= x_upper:
y_surrogate = y_sim_i
else:
continue
surrogate_all.append(y_surrogate)
#print("GLE OVO ", x.category ,y_surrogate, x_lower, x_upper)
x_lower_all.append(x_lower)
x_upper_all.append(x_upper)
xi[:problem.groups[gr]['num_datapoints']] = np.array(surrogate_all).flatten()
xi[problem.groups[gr]['lb_indices']] = np.array(x_lower_all)
xi[problem.groups[gr]['ub_indices']] = np.array(x_upper_all)
return xi
|
# -*- coding: utf-8 -*-
"""Settings for when running under docker in development mode."""
from .dev import * # noqa
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(ABS_PATH('./'), 'db.sqlite3'),
},
'backend': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': os.environ['POSTGRES_DB'],
'USER': os.environ['POSTGRES_USER'],
'PASSWORD': os.environ['POSTGRES_PASS'],
'HOST': os.environ['POSTGRES_HOST'],
'PORT': os.environ['POSTGRES_PORT'],
'TEST_NAME': 'unittests',
},
}
|
import logging
from datetime import timedelta, datetime
import traceback
import time
from django_cronium.models import CronJobLog
from django.utils import timezone
class Schedule(object):
def __init__(self, run_every_mins=None, run_at_times=[], retry_after_failure_mins=None):
self.run_every_mins = run_every_mins
self.run_at_times = run_at_times
self.retry_after_failure_mins = retry_after_failure_mins
class CronJobBase(object):
"""
Sub-classes should have the following properties:
+ code - This should be a code specific to the cron being run. Eg. 'general.stats' etc.
+ schedule
Following functions:
+ do - This is the actual business logic to be run at the given schedule
"""
pass
class CronJobManager(object):
"""
A manager instance should be created per cron job to be run. Does all the logging tracking etc. for it.
"""
@classmethod
def __should_run_now(self, cron_job, force=False):
"""
Returns a boolean determining whether this cron should run now or not!
"""
# If we pass --force options, we force cron run
self.user_time = None
if force:
return True
if cron_job.schedule.run_every_mins != None:
# We check last job - success or not
last_job = None
try:
last_job = CronJobLog.objects.filter(code=cron_job.code).latest('start_time')
except CronJobLog.DoesNotExist:
pass
if last_job:
if not last_job.is_success and cron_job.schedule.retry_after_failure_mins:
if timezone.now() > last_job.start_time + timedelta(minutes=cron_job.schedule.retry_after_failure_mins):
return True
else:
return False
previously_ran_successful_cron = None
try:
previously_ran_successful_cron = CronJobLog.objects.filter(code=cron_job.code, is_success=True, ran_at_time__isnull=True).latest('start_time')
except CronJobLog.DoesNotExist:
pass
if previously_ran_successful_cron:
if timezone.now() > previously_ran_successful_cron.start_time + timedelta(minutes=cron_job.schedule.run_every_mins):
return True
else:
return True
if cron_job.schedule.run_at_times:
for time_data in cron_job.schedule.run_at_times:
user_time = time.strptime(time_data, "%H:%M")
actual_time = time.strptime("%s:%s" % (datetime.now().hour, datetime.now().minute), "%H:%M")
if actual_time >= user_time:
qset = CronJobLog.objects.filter(code=cron_job.code, start_time__gt=datetime.today().date(), ran_at_time=time_data)
if not qset:
self.user_time = time_data
return True
return False
@classmethod
def run(self, cron_job, force=False, silent=False):
"""
apply the logic of the schedule and call do() on the CronJobBase class
"""
if not isinstance(cron_job, CronJobBase):
raise Exception('The cron_job to be run should be a subclass of %s' % CronJobBase.__class__)
if CronJobManager.__should_run_now(cron_job, force):
logging.debug("Running cron: %s" % cron_job)
cron_log = CronJobLog(code=cron_job.code, start_time=timezone.now())
try:
msg = cron_job.do()
cron_log.is_success = True
cron_log.message = msg or ''
except Exception:
error = traceback.format_exc()
if not silent:
print(error)
cron_log.is_success = False
cron_log.message = error[-1000:]
cron_log.ran_at_time = self.user_time if self.user_time else None
cron_log.end_time = timezone.now()
cron_log.save()
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
import itertools
from paddlerec.core.utils import envs
from paddlerec.core.model import ModelBase
class Model(ModelBase):
def __init__(self, config):
ModelBase.__init__(self, config)
def _init_hyper_parameters(self):
self.is_distributed = True if envs.get_fleet_mode().upper(
) == "PSLIB" else False
self.sparse_feature_number = envs.get_global_env(
"hyper_parameters.sparse_feature_number")
self.sparse_feature_dim = envs.get_global_env(
"hyper_parameters.sparse_feature_dim")
self.learning_rate = envs.get_global_env(
"hyper_parameters.optimizer.learning_rate")
def _SENETLayer(self, inputs, filed_size, reduction_ratio=3):
reduction_size = max(1, filed_size // reduction_ratio)
Z = fluid.layers.reduce_mean(inputs, dim=-1)
A_1 = fluid.layers.fc(
input=Z,
size=reduction_size,
param_attr=fluid.initializer.Xavier(uniform=False),
act='relu',
name='W_1')
A_2 = fluid.layers.fc(
input=A_1,
size=filed_size,
param_attr=fluid.initializer.Xavier(uniform=False),
act='relu',
name='W_2')
V = fluid.layers.elementwise_mul(
inputs, y=fluid.layers.unsqueeze(
input=A_2, axes=[2]))
return fluid.layers.split(V, num_or_sections=filed_size, dim=1)
def _BilinearInteraction(self,
inputs,
filed_size,
embedding_size,
bilinear_type="interaction"):
if bilinear_type == "all":
p = [
fluid.layers.elementwise_mul(
fluid.layers.fc(
input=v_i,
size=embedding_size,
param_attr=fluid.initializer.Xavier(uniform=False),
act=None,
name=None),
fluid.layers.squeeze(
input=v_j, axes=[1]))
for v_i, v_j in itertools.combinations(inputs, 2)
]
else:
raise NotImplementedError
return fluid.layers.concat(input=p, axis=1)
def _DNNLayer(self, inputs, dropout_rate=0.5):
deep_input = inputs
for i, hidden_unit in enumerate([400, 400, 400]):
fc_out = fluid.layers.fc(
input=deep_input,
size=hidden_unit,
param_attr=fluid.initializer.Xavier(uniform=False),
act='relu',
name='d_' + str(i))
fc_out = fluid.layers.dropout(fc_out, dropout_prob=dropout_rate)
deep_input = fc_out
return deep_input
def net(self, input, is_infer=False):
self.sparse_inputs = self._sparse_data_var[1:]
self.dense_input = self._dense_data_var[0]
self.label_input = self._sparse_data_var[0]
emb = []
for data in self.sparse_inputs:
feat_emb = fluid.embedding(
input=data,
size=[self.sparse_feature_number, self.sparse_feature_dim],
param_attr=fluid.ParamAttr(
name='dis_emb',
learning_rate=5,
initializer=fluid.initializer.Xavier(
fan_in=self.sparse_feature_dim,
fan_out=self.sparse_feature_dim)),
is_sparse=True)
emb.append(feat_emb)
concat_emb = fluid.layers.concat(emb, axis=1)
filed_size = len(self.sparse_inputs)
bilinear_type = envs.get_global_env("hyper_parameters.bilinear_type")
reduction_ratio = envs.get_global_env(
"hyper_parameters.reduction_ratio")
dropout_rate = envs.get_global_env("hyper_parameters.dropout_rate")
senet_output = self._SENETLayer(concat_emb, filed_size,
reduction_ratio)
senet_bilinear_out = self._BilinearInteraction(
senet_output, filed_size, self.sparse_feature_dim, bilinear_type)
concat_emb = fluid.layers.split(
concat_emb, num_or_sections=filed_size, dim=1)
bilinear_out = self._BilinearInteraction(
concat_emb, filed_size, self.sparse_feature_dim, bilinear_type)
dnn_input = fluid.layers.concat(
input=[senet_bilinear_out, bilinear_out, self.dense_input], axis=1)
dnn_output = self._DNNLayer(dnn_input, dropout_rate)
y_pred = fluid.layers.fc(
input=dnn_output,
size=1,
param_attr=fluid.initializer.Xavier(uniform=False),
act='sigmoid',
name='logit')
self.predict = y_pred
auc, batch_auc, _ = fluid.layers.auc(input=self.predict,
label=self.label_input,
num_thresholds=2**12,
slide_steps=20)
if is_infer:
self._infer_results["AUC"] = auc
self._infer_results["BATCH_AUC"] = batch_auc
return
self._metrics["AUC"] = auc
self._metrics["BATCH_AUC"] = batch_auc
cost = fluid.layers.log_loss(
input=self.predict,
label=fluid.layers.cast(
x=self.label_input, dtype='float32'))
avg_cost = fluid.layers.reduce_mean(cost)
self._cost = avg_cost
|
#!/usr/bin/env python
import torch
import tqdm
import sys
from fairseq.models.bart import BARTModel
if len(sys.argv) < 5:
print("Usage: python bart_infer.py ckp_path bin_path source_file target_file")
sys.exit(0)
ckp_path = sys.argv[1]
bin_path = sys.argv[2]
source_file = sys.argv[3]
target_file = sys.argv[4]
bart = BARTModel.from_pretrained(
ckp_path,
checkpoint_file='checkpoint_best.pt',
data_name_or_path=bin_path,
)
bart.cuda()
bart.eval()
bart.half()
count = 1
bsz = 2 # make it small (2) for atis, 32 for others
with open(source_file) as source, open(target_file, 'w') as fout:
sline = source.readline().strip()
slines = [sline]
for sline in tqdm.tqdm(source):
if count % bsz == 0:
with torch.no_grad():
hypotheses_batch = bart.sample(slines, beam=4, lenpen=2.0, max_len_b=20, min_len=6, no_repeat_ngram_size=3)
for hypothesis in hypotheses_batch:
fout.write(hypothesis + '\n')
fout.flush()
slines = []
slines.append(sline.strip())
count += 1
# leftover
if len(slines) != 0:
with torch.no_grad():
hypotheses_batch = bart.sample(slines, beam=4, lenpen=2.0, max_len_b=20, min_len=6, no_repeat_ngram_size=3)
for hypothesis in hypotheses_batch:
fout.write(hypothesis + '\n')
fout.flush()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import numpy as np
from phrun.cache import Cache
from phrun.runner import Runner
Cache.set_root_dir('.')
def test_cache():
cache = Cache.get_cache('test_common')
cache.set('int', 100)
assert cache.get('int') == 100
cache.clean()
def test_runner():
r = Runner().use_cache('test_common')
r.add_phase('src', lambda: (1, 2)) \
.add_phase('add', lambda x: x[0] + x[1]) \
.add_phase('pow', lambda x: x ** 2)
out = r.run()
print(out)
out = r.run_from(1)
print(out)
def main():
# test_cache()
test_runner()
return
if __name__ == '__main__':
main()
|
import xml.etree.ElementTree as ET
from urllib import request, parse
from copy import copy
def soap_request(url, data):
req = request.Request(url, data=data.encode('utf-8'),
headers={'content-type': 'text/xml;charset=utf-8'}, method='POST')
rep = request.urlopen(req)
return xml_to_dict(ET.fromstring(rep.read().decode('utf-8'))) if rep.getcode() is 200 else None
def url_decode(url):
return parse.unquote(url)
def strip_tag_name(t):
idx = t.rfind("}")
if idx != -1:
t = t[idx + 1:]
return t
def xml_to_dict(r, root=True):
if root:
return {strip_tag_name(r.tag): xml_to_dict(r, False)}
d = copy(r.attrib)
if r.text:
d['text'] = r.text
for x in r.findall("./*"):
if x.tag not in d:
d[strip_tag_name(x.tag)] = []
d[strip_tag_name(x.tag)].append(xml_to_dict(x, False))
return d
|
# -*- coding: utf-8 -*-
from .permissions import SignedPermission # noqa
from .signing import sign_filter_permissions # noqa
from .views import SignedViewSetMixin # noqa
|
import numpy as np
import cv2
import sys
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
print (sys.argv[1])
cv2.imwrite("./faces/"+sys.argv[1]+".jpg", frame)
cap.release()
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Purpose
Retrieves a player achievement, earned or unearned.
This is a player facing Lambda function and used in-game.
"""
import botocore
from gamekithelpers import handler_request, handler_response, ddb
import distutils.core
import os
ddb_player_table = ddb.get_table(os.environ['PLAYER_ACHIEVEMENTS_TABLE_NAME'])
ddb_game_table = ddb.get_table(os.environ['ACHIEVEMENTS_TABLE_NAME'])
def _get_player_achievement(player_id, achievement_id, use_consistent_read):
try:
response = ddb_player_table.get_item(
**ddb.get_item_request_param({'player_id': player_id, 'achievement_id': achievement_id},
use_consistent_read))
player_achievement = ddb.get_response_item(response)
except botocore.exceptions.ClientError as err:
print(f"Error retrieving achievement_id: {achievement_id} for player_id: {player_id}. Error: {err}")
raise err
if player_achievement is None:
player_achievement = {
'current_value': 0,
'earned': False,
'earned_at': None
}
return player_achievement
def _get_achievement(player_id, achievement_id, use_consistent_read):
response = ddb_game_table.get_item(**ddb.get_item_request_param({'achievement_id': achievement_id}, use_consistent_read))
achievement = ddb.get_response_item(response)
if achievement is not None and achievement['is_hidden']:
return None
if achievement is not None:
# get player achievement
player_achievement = _get_player_achievement(player_id,
achievement['achievement_id'],
use_consistent_read)
if achievement['is_secret'] and not player_achievement['earned']:
return None
# merge results; the timestamp attributes will be from the player's achievement
achievement.update(player_achievement)
achievement.setdefault('current_value', 0)
achievement.setdefault('earned', False)
achievement.setdefault('earned_at', None)
return achievement
def lambda_handler(event, context):
"""
This is the lambda function handler.
"""
handler_request.log_event(event)
# Get player_id from requestContext
player_id = handler_request.get_player_id(event)
if player_id is None:
return handler_response.response_envelope(401)
# Get achievement_id from path
achievement_id = handler_request.get_path_param(event, 'achievement_id')
if achievement_id is None:
return handler_response.invalid_request()
use_consistent_read = bool(distutils.util.strtobool(handler_request.get_query_string_param(event, 'use_consistent_read', 'false')))
try:
achievement = _get_achievement(player_id, achievement_id, use_consistent_read)
except botocore.exceptions.ClientError as err:
print(f"Error retrieving items. Error: {err}")
raise err
if achievement is None:
return handler_response.response_envelope(404, None)
return handler_response.response_envelope(200, None, achievement)
|
from tkinter import *
from PIL import ImageTk, Image
root = Tk() # Init app
root.title("Images") # Set title here
root.iconbitmap("Images/neon.ico") # Insert icon here
my_img = ImageTk.PhotoImage(Image.open("Images/Burger.jpg").resize((300,400))) # Insert image here
my_label = Label(image=my_img)
my_label.pack()
button_quit = Button(root, text="Exit Program", command=root.quit)
button_quit.pack()
root.mainloop() # Keep the app running
|
#!/usr/bin/env python
import wx
import images
FRAMETB = True
TBFLAGS = ( wx.TB_HORIZONTAL
| wx.NO_BORDER
| wx.TB_FLAT
#| wx.TB_TEXT
#| wx.TB_HORZ_LAYOUT
)
#---------------------------------------------------------------------------
class TestSearchCtrl(wx.SearchCtrl):
maxSearches = 5
def __init__(self, parent, id=-1, value="",
pos=wx.DefaultPosition, size=wx.DefaultSize, style=0,
doSearch=None):
style |= wx.TE_PROCESS_ENTER
wx.SearchCtrl.__init__(self, parent, id, value, pos, size, style)
self.Bind(wx.EVT_TEXT_ENTER, self.OnTextEntered)
self.Bind(wx.EVT_SEARCHCTRL_SEARCH_BTN, self.OnTextEntered)
self.Bind(wx.EVT_MENU_RANGE, self.OnMenuItem, id=1, id2=self.maxSearches)
self.doSearch = doSearch
self.searches = []
def OnTextEntered(self, evt):
text = self.GetValue()
if self.doSearch(text):
self.searches.append(text)
if len(self.searches) > self.maxSearches:
del self.searches[0]
self.SetMenu(self.MakeMenu())
self.SetValue("")
def OnMenuItem(self, evt):
text = self.searches[evt.GetId()-1]
self.doSearch(text)
def MakeMenu(self):
menu = wx.Menu()
item = menu.Append(-1, "Recent Searches")
item.Enable(False)
for idx, txt in enumerate(self.searches):
menu.Append(1+idx, txt)
return menu
class TestToolBar(wx.Frame):
def __init__(self, parent, log):
wx.Frame.__init__(self, parent, -1, 'Test ToolBar', size=(600, 400))
self.log = log
self.timer = None
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
client = wx.Panel(self)
client.SetBackgroundColour(wx.WHITE)
if FRAMETB:
# Use the wxFrame internals to create the toolbar and
# associate it all in one tidy method call. By using
# CreateToolBar or SetToolBar the "client area" of the
# frame will be adjusted to exclude the toolbar.
tb = self.CreateToolBar( TBFLAGS )
# Here's a 'simple' toolbar example, and how to bind it using SetToolBar()
#tb = wx.ToolBarSimple(self, -1, wx.DefaultPosition, wx.DefaultSize,
# wx.TB_HORIZONTAL | wx.NO_BORDER | wx.TB_FLAT)
#self.SetToolBar(tb)
# But we're doing it a different way here.
else:
# The toolbar can also be a child of another widget, and
# be managed by a sizer, although there may be some
# implications of doing this on some platforms.
tb = wx.ToolBar(client, style=TBFLAGS)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(tb, 0, wx.EXPAND)
client.SetSizer(sizer)
log.write("Default toolbar tool size: %s\n" % tb.GetToolBitmapSize())
self.CreateStatusBar()
tsize = (24,24)
new_bmp = wx.ArtProvider.GetBitmap(wx.ART_NEW, wx.ART_TOOLBAR, tsize)
open_bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, wx.ART_TOOLBAR, tsize)
copy_bmp = wx.ArtProvider.GetBitmap(wx.ART_COPY, wx.ART_TOOLBAR, tsize)
paste_bmp= wx.ArtProvider.GetBitmap(wx.ART_PASTE, wx.ART_TOOLBAR, tsize)
tb.SetToolBitmapSize(tsize)
#tb.AddTool(10, new_bmp, "New", "Long help for 'New'")
tb.AddTool(10, "New", new_bmp, wx.NullBitmap, wx.ITEM_NORMAL, "New", "Long help for 'New'", None)
self.Bind(wx.EVT_TOOL, self.OnToolClick, id=10)
self.Bind(wx.EVT_TOOL_RCLICKED, self.OnToolRClick, id=10)
#tb.AddTool(20, open_bmp, "Open", "Long help for 'Open'")
tb.AddTool(20, "Open", open_bmp, wx.NullBitmap, wx.ITEM_NORMAL, "Open", "Long help for 'Open'", None)
self.Bind(wx.EVT_TOOL, self.OnToolClick, id=20)
self.Bind(wx.EVT_TOOL_RCLICKED, self.OnToolRClick, id=20)
tb.AddSeparator()
tb.AddTool(30, "Copy", copy_bmp, wx.NullBitmap, wx.ITEM_NORMAL, "Copy", "Long help for 'Copy'", None)
self.Bind(wx.EVT_TOOL, self.OnToolClick, id=30)
self.Bind(wx.EVT_TOOL_RCLICKED, self.OnToolRClick, id=30)
tb.AddTool(40, "Paste", paste_bmp, wx.NullBitmap, wx.ITEM_NORMAL, "Paste", "Long help for 'Paste'", None)
self.Bind(wx.EVT_TOOL, self.OnToolClick, id=40)
self.Bind(wx.EVT_TOOL_RCLICKED, self.OnToolRClick, id=40)
tb.AddSeparator()
#tool = tb.AddCheckTool(50, images.Tog1.GetBitmap(), shortHelp="Toggle this")
tool = tb.AddTool(50, "Checkable", images.Tog1.GetBitmap(),
shortHelp="Toggle this", kind=wx.ITEM_CHECK)
self.Bind(wx.EVT_TOOL, self.OnToolClick, id=50)
self.Bind(wx.EVT_TOOL_ENTER, self.OnToolEnter)
self.Bind(wx.EVT_TOOL_RCLICKED, self.OnToolRClick) # Match all
self.Bind(wx.EVT_TIMER, self.OnClearSB)
tb.AddSeparator()
cbID = wx.NewIdRef()
tb.AddControl(
wx.ComboBox(
tb, cbID, "", choices=["", "This", "is a", "wx.ComboBox"],
size=(150,-1), style=wx.CB_DROPDOWN
))
self.Bind(wx.EVT_COMBOBOX, self.OnCombo, id=cbID)
tb.AddStretchableSpace()
search = TestSearchCtrl(tb, size=(150,-1), doSearch=self.DoSearch)
tb.AddControl(search)
# Final thing to do for a toolbar is call the Realize() method. This
# causes it to render (more or less, that is).
tb.Realize()
def DoSearch(self, text):
# called by TestSearchCtrl
self.log.WriteText("DoSearch: %s\n" % text)
# return true to tell the search ctrl to remember the text
return True
def OnToolClick(self, event):
self.log.WriteText("tool %s clicked\n" % event.GetId())
#tb = self.GetToolBar()
tb = event.GetEventObject()
tb.EnableTool(10, not tb.GetToolEnabled(10))
def OnToolRClick(self, event):
self.log.WriteText("tool %s right-clicked\n" % event.GetId())
def OnCombo(self, event):
self.log.WriteText("combobox item selected: %s\n" % event.GetString())
def OnToolEnter(self, event):
self.log.WriteText('OnToolEnter: %s, %s\n' % (event.GetId(), event.GetInt()))
if self.timer is None:
self.timer = wx.Timer(self)
if self.timer.IsRunning():
self.timer.Stop()
self.timer.Start(2000)
event.Skip()
def OnClearSB(self, event): # called for the timer event handler
self.SetStatusText("")
self.timer.Stop()
self.timer = None
def OnCloseWindow(self, event):
if self.timer is not None:
self.timer.Stop()
self.timer = None
self.Destroy()
#---------------------------------------------------------------------------
class TestPanel(wx.Panel):
def __init__(self, parent, log):
self.log = log
wx.Panel.__init__(self, parent, -1)
b = wx.Button(self, -1, "Show the ToolBar sample", (50,50))
self.Bind(wx.EVT_BUTTON, self.OnButton, b)
def OnButton(self, evt):
win = TestToolBar(self, self.log)
win.Show(True)
self.frame = win
#---------------------------------------------------------------------------
def runTest(frame, nb, log):
win = TestPanel(nb, log)
return win
#---------------------------------------------------------------------------
overview = """\
wx.ToolBar is a narrow strip of icons on one side of a frame (top, bottom, sides)
that acts much like a menu does, except it is always visible. Additionally, actual
wxWindows controls, such as wx.TextCtrl or wx.ComboBox, can be added to the toolbar
and used from within it.
Toolbar creation is a two-step process. First, the toolbar is defined using the
various Add* methods of wx.ToolBar. Once all is set up, then wx.Toolbar.Realize()
must be called to render it.
wx.Toolbar events are also propogated as Menu events; this is especially handy when
you have a menu bar that contains items that carry out the same function. For example,
it is not uncommon to have a little 'floppy' toolbar icon to 'save' the current file
(whatever it is) as well as a FILE/SAVE menu item that does the same thing. In this
case, both events can be captured and acted upon using the same event handler
with no ill effects.
If there are cases where a toolbar icon should *not* be associated with a menu item,
use a unique ID to trap it.
There are a number of ways to create a toolbar for a wx.Frame. wx.Frame.CreateToolBar()
does all the work except it adds no buttons at all unless you override the virtual method
OnCreateToolBar(). On the other hand, you can just subclass wx.ToolBar and then use
wx.Frame.SetToolBar() instead.
Note that wx.TB_DOCKABLE is only supported under GTK. An attempt to alleviate this
is provided in wx.lib.floatbar, but it is not formally supported.
"""
if __name__ == '__main__':
import sys,os
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import pytest
from basecls.layers import NORM_TYPES
from basecls.models.resnet import resnet18
from basecls.solver.optimizer import SGD
from basecls.solver.weight_decay import get_param_groups
@pytest.mark.parametrize("weight_decay", [0, 1e-4, [(1e-5, "bias"), (0, NORM_TYPES), 1e-4]])
def test_weight_decay(weight_decay):
model = resnet18()
params = get_param_groups(model, weight_decay)
SGD(params, 0.1, momentum=0.9)
|
from logger import Logger
logger = Logger()
logger.log("hello")
logger.log("goodbye")
logger.print_messages()
logger.log("something else")
print("")
logger.print_messages()
|
#!/usr/bin/env python
"""
Copyright 2015 ARC Centre of Excellence for Climate Systems Science
author: Scott Wales <scott.wales@unimelb.edu.au>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from Fortran03Lexer import Fortran03Lexer
from Fortran03Parser import Fortran03Parser
from antlr4 import CommonTokenStream
def parse(stream):
"""
Parse a stream using antlr
Inputs:
- stream: an antlr4.FileStream (to parse a file) or antlr4.InputStream (to parse a
string)
Outputs:
- An Antlr parser object. Extract parse trees using functions with the
names of grammar products, e.g.
parse(InputStream('function foo(bar)')).functionStmt()
"""
lex = Fortran03Lexer(stream)
toks = CommonTokenStream(lex)
par = Fortran03Parser(toks)
return par
|
""" LibMCS2018.py
Module assembling algorithms and data structures
Supplemental Material for the Lecture Notes "Networks - A brief Introduction
using a Paradigmatic Combinatorial Optimization Problem" at the international
summer school "Modern Computational Science 10 - Energy of the Future" held
in Oldenburg, September 3-14, 2018
Author: O. Melchert
Date: 2018-09-11
"""
from src.graphAdjacencyList import fetchWeightedGraph
from src.minWgtSpannTree import mstKruskal, mstGraphviz
# EOF: LibMCS2018.py
|
import pytest
from assertions import list_is
dummy_lst = [{"name": "Elmer"}, {"name": "Sam"}]
params = (
("lst", "subset_lst", "expected"),
[
([], [], True),
([{}, {}], [{}, {}, {}], True),
(
[{"id": 1, "name": "Jon", "pets": []}, {"id": 2, "name": "Sam"}],
[{"id": 1, "pets": []}],
True,
),
([{"id": 1}], [{"id": 1, "name": "Jon"}, {"id": 2, "name": "Sam"}], False),
([{"id": 1}], [{"id": 1, "name": "Elmer"}], False),
(
[{"id": 1, "name": "Elmer"}, {"id": 2, "name": "Sam"}],
[{"id": 1, "name": "Elmer"}, {"id": 2, "name": "Sam"}],
True,
),
(dummy_lst, dummy_lst, True),
],
)
@pytest.mark.parametrize(*params)
def test_list_is_subset_of(lst, subset_lst, expected):
if expected is True:
assert list_is(subset_lst).subset_of(lst)
assert list_is(subset_lst) <= lst
else:
assert not list_is(subset_lst).subset_of(lst)
assert not list_is(subset_lst) <= lst
@pytest.mark.parametrize(*params)
def test_list_has_subset(lst, subset_lst, expected):
if expected is True:
assert list_is(lst).has_subset(subset_lst)
assert list_is(lst) >= subset_lst
else:
assert not list_is(lst).has_subset(subset_lst)
assert not list_is(lst) >= subset_lst
|
api_key = 'Your API Key goes here'
api_key_secret = 'Your API Secret Key goes here'
access_token = 'Your Access Token goes here'
access_token_secret = 'Your Access Token Secret goes here'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.