content stringlengths 5 1.05M |
|---|
from server.util import ScriptManager
def npcClick2_522(c, npcId):
c.getShops().openShop(1)
def npcClick2_523(c, npcId):
c.getShops().openShop(1)
def npcClick2_546(c, npcId):
c.getShops().openShop(7)
def npcClick2_548(c, npcId):
c.getShops().openShop(8)
def npcClick2_537(c, npcId):
c.getShops().openShop(9)
def npcClick2_582(c, npcId):
c.getShops().openShop(48) |
class Road:
_length: int
_width: int
def __init__(self, length: int, width: int):
"""конструктор класса
:param length: длинна в метрах
:param width: ширина в метрах
"""
self._length = length
self._width = width
def calculate(self, height: int = 5, mass_m_2: int = 25) -> int:
"""
считает масу массу асфальта, необходимого для покрытия всей дороги в тоннах
:param hight: высота дорожного полотна в сантиметрах
:param mass_m_2: масса в кг квадратного метра дороги высотой 1 см
:return: int значение тонн, дробная часть если есть НЕ учитывается
"""
return self._length * self._width * mass_m_2 * height // 1000
if __name__ == '__main__':
road = Road(5000, 20)
print(f'Для изготовления покрытия дороги нужно {road.calculate()} тонн.')
|
# -*- coding: utf-8 -*-
from settings import * # noqa
HIPCHAT_ENABLED = False
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
},
}
# the django apps aren't required for the tests,
INSTALLED_APPS = ('trello_webhooks',)
try:
import django_nose # noqa
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
print u"TEST_RUNNER set to use django_nose"
import coverage # noqa
print u"TEST_RUNNER config includes coverage"
NOSE_ARGS = [
'--with-coverage',
'--cover-package=trello_webhooks',
'--cover-html',
'--cover-html-dir=coverage_reports'
]
except ImportError:
pass
|
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
import os
from pathlib import Path
from aws_cdk import (
aws_lambda as lambda_,
aws_apigateway as api_gw,
aws_efs as efs,
aws_ec2 as ec2,
core as cdk,
)
class ServerlessHuggingFaceStack(cdk.Stack):
def __init__(self, scope: cdk.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# EFS needs to be setup in a VPC
vpc = ec2.Vpc(self, "Vpc", max_azs=2)
# creates a file system in EFS to store cache models
fs = efs.FileSystem(
self, "FileSystem", vpc=vpc, removal_policy=cdk.RemovalPolicy.DESTROY
)
access_point = fs.add_access_point(
"MLAccessPoint",
create_acl=efs.Acl(owner_gid="1001", owner_uid="1001", permissions="750"),
path="/export/models",
posix_user=efs.PosixUser(gid="1001", uid="1001"),
)
# %%
# iterates through the Python files in the docker directory
docker_folder = os.path.dirname(os.path.realpath(__file__)) + "/inference"
pathlist = Path(docker_folder).rglob("*.py")
for path in pathlist:
base = os.path.basename(path)
filename = os.path.splitext(base)[0]
# Lambda Function from docker image
function = lambda_.DockerImageFunction(
self,
filename,
code=lambda_.DockerImageCode.from_image_asset(
docker_folder, cmd=[filename + ".handler"]
),
memory_size=8096,
timeout=cdk.Duration.seconds(600),
vpc=vpc,
filesystem=lambda_.FileSystem.from_efs_access_point(
access_point, "/mnt/hf_models_cache"
),
environment={"TRANSFORMERS_CACHE": "/mnt/hf_models_cache"},
)
api = api_gw.RestApi(
self,
f"{filename}-api",
rest_api_name=f"{filename} Service",
description=f"This service serves {filename}.",
default_cors_preflight_options=api_gw.CorsOptions(
allow_origins=api_gw.Cors.ALL_ORIGINS,
allow_methods=api_gw.Cors.ALL_METHODS,
),
)
# adds method for the function
lambda_integration = api_gw.LambdaIntegration(function)
api.root.add_method("ANY")
api_resource = api.root.add_resource(filename)
api_resource.add_method("GET", lambda_integration)
api_resource.add_method("POST", lambda_integration)
app = cdk.App()
ServerlessHuggingFaceStack(app, "ServerlessHuggingFaceStack")
app.synth()
# %%
|
import logging
from SDM.nodes.MiddleWare import MiddleWare
from SDM.rules.SynDestPushingRule import SynDestPushingRule
from SDM.util import bytes_to_ipv4
class SynMiddleWare(MiddleWare):
def __init__(self, ovs_switch, controller_ip="127.0.0.1", switch_ip=None, controller_port=6633, switch_port=None,
protocols=None):
super(SynMiddleWare, self).__init__(ovs_switch, controller_ip, switch_ip, controller_port, switch_port,
protocols)
self.frontier_default_value = {'tcp_packets': 0, 'syn_packets': 0}
self.logger = logging.getLogger(__name__)
self.logger.info("Created SynMiddleWare")
def handle_results(self, res, rule):
if not res:
return
lines = res.split('\r')
syn_packets = 0
tcp_packets = 0
current_rate = 0
for line in lines[1:3]:
stat = {}
res = line.replace('\r', ' ').replace('\n', ' ').replace(',', ' ').split(' ')
res = [x for x in res if "=" in x]
for r in res:
stat[r.split('=')[0]] = r.split('=')[1]
try:
# noinspection PyUnusedLocal
f = stat['tcp_flags']
syn_packets = int(stat['n_packets'])
except KeyError:
try:
tcp_packets = int(stat['n_packets'])
except KeyError:
self.logger.info('Request sent faster than switch ramp up')
prev_tcp_count = self.frontier_values[rule]['tcp_packets']
prev_syn_count = self.frontier_values[rule]['syn_packets']
self.frontier_values[rule] = {'tcp_packets': tcp_packets, 'syn_packets': syn_packets}
try:
current_rate = ((1.0 * syn_packets - prev_syn_count) / (
(tcp_packets - prev_tcp_count) + (syn_packets - prev_syn_count))) * 100
except ZeroDivisionError:
pass
self.logger.info('rule '
'syn-packets tcp-packets syn-rate ')
self.logger.info('-------------------------------------------------------- '
'----------- ----------- ---------')
self.logger.info('%56s %011d %011d %03.6f', rule, syn_packets - prev_syn_count, tcp_packets - prev_tcp_count,
current_rate)
self.handle_rule_stat(rule, current_rate)
def create_rule(self, ip_addr, mask):
return SynDestPushingRule(self.ovs_switch, self.datapath, ip_addr, mask, 1, 2, None, self.protocol_str)
def handle_raw_msg(self, data):
hex_data = ':'.join('{:02x}'.format(x) for x in data)
if len(data) != 0:
if hex_data.split(":")[1] == "0e" and hex_data.split(":")[24] == "01" and len(data) == 104:
ip_addr = bytes_to_ipv4(str(data[67:71]))
mask = bytes_to_ipv4(str(data[71:75]))
self.logger.debug("ip_bytes %s | mask_bytes %s", ':'.join('{:02x}'.format(x) for x in data[67:71]),
':'.join('{:02x}'.format(x) for x in data[71:75]))
self.logger.debug("Monitoring %s:%s", ip_addr, mask)
self.monitor(ip_addr, mask)
return data
else:
return None
# noinspection PyUnusedLocal,PyMethodMayBeStatic
def get_rule_threshold(self, rule):
return 2
|
#!/usr/bin/env python3
import random
from operator import itemgetter
from matplotlib import pyplot as plt
import sys
class GeneticAlgorithm(object):
def __init__(self, genetics):
self.genetics = genetics
pass
def run(self):
"""
Run method represents core of genetic algorithm :
randomly initialize population(t)
determine fitness of population(t)
repeat
select parents from population(t)
perform crossover on parents creating population(t+1)
perform mutation of population(t+1)
determine fitness of population(t+1)
until best individual is good enough
:return:
"""
population = self.genetics.get_initial_population()
while True:
# calculate fitness for every individual
# generate tuple[ fitness , list_individual] and create list for population
fitness_population = [(self.genetics.fitness(ch), ch) for ch in population]
if self.genetics.check_stop(fitness_population):
self.genetics.plot_result()
break
population = self.next_population(fitness_population)
return population
def next_population(self, fits):
parents_generator = self.genetics.parents(fits)
size = len(fits)
next_pop_l = []
save_best = self.genetics.save_best
# save best of previous population ( as professor sad save the einstein )
sorted_fits = sorted(fits, key=itemgetter(0))
for i in range(1, save_best+1):
next_pop_l.append(sorted_fits[-i][1])
while len(next_pop_l) < size:
parents = next(parents_generator)
cross = random.random() < self.genetics.get_probability_crossover()
children = self.genetics.crossover(parents) if cross else parents
if random.random() < self.genetics.get_probability_mutation():
for ch in children:
mutate = random.random() < self.genetics.get_probability_mutation()
next_pop_l.append(self.genetics.mutation(ch) if mutate else ch)
pass
pass
pass
return next_pop_l[0:size]
pass
pass
class GuessText:
def __init__(self, target_text, limit=1000, size=250, prob_crossover=0.9, prob_mutation=0.1, save_best=5):
"""
:param target_text: text that algorithm should guess
:param limit: how many generations should be
:param size: size of population
:param prob_crossover: probability of crossover
:param prob_mutation: probability of mutation
"""
self.target = self.text2chromo(target_text)
self.counter = 0
self.limit = limit
self.size = size
self.prob_crossover = prob_crossover
self.prob_mutation = prob_mutation
self.save_best = save_best
self.fitness_best = []
self.fitness_worst = []
self.fitness_average = []
# pass
def get_probability_crossover(self):
return self.prob_crossover
def get_probability_mutation(self):
return self.prob_mutation
def get_initial_population(self):
# generate initial population, returns list of self.size
return [self.random_chromo() for j in range(self.size)]
def fitness(self, chromo):
# larger is better, matched == 0
return -sum(abs(c - t) for c, t in zip(chromo, self.target))
def check_stop(self, fits_populations):
self.counter += 1
best_match = list(sorted(fits_populations))[-1]
best_fitness = best_match[0]
best_individual = best_match[1]
fits = [f for f, ch in fits_populations]
best = max(fits)
worst = min(fits)
average = sum(fits) / len(fits)
if self.counter % 1 == 0:
self.fitness_best.append((self.counter, best))
self.fitness_worst.append((self.counter, worst))
self.fitness_average.append((self.counter, average))
if self.counter % 10 == 0:
print("[G %3d] score=(%4d, %4d, %4d): %r" % (self.counter, best, average, worst, self.chromo2text(best_individual)))
pass
pass
if best_fitness == 0:
print("[G %3d] score=(%4d, %4d, %4d): %r" % (self.counter, best, average, worst, self.chromo2text(best_individual)))
return True
return self.counter >= self.limit
def parents(self, fits_populations):
while True:
best_1 = self.tournament(fits_populations)
best_2 = self.tournament(fits_populations)
yield (best_1, best_2)
pass
pass
def tournament(self, fits_populations):
rand_1_fitness, rand_1 = self.select_random(fits_populations)
rand_2_fitness, rand_2 = self.select_random(fits_populations)
if rand_1_fitness > rand_2_fitness:
return rand_1
else:
return rand_2
def crossover(self, parents):
parent1, parent2 = parents
index1 = random.randint(1, len(self.target) - 2)
index2 = random.randint(1, len(self.target) - 2)
if index1 > index2:
index1, index2 = index2, index1
child1 = parent1[:index1] + parent2[index1:index2] + parent1[index2:]
child2 = parent2[:index1] + parent1[index1:index2] + parent2[index2:]
return child1, child2
def mutation(self, chromosome):
index = random.randint(0, len(self.target) - 1)
vary = random.randint(-5, 5)
mutated = list(chromosome)
mutated[index] += vary
return mutated
def select_random(self, fits_populations):
return fits_populations[random.randint(0, len(fits_populations) - 1)]
def text2chromo(self, text):
return [ord(ch) for ch in text]
def chromo2text(self, chromo):
return "".join(chr(max(1, min(ch, 255))) for ch in chromo)
def random_chromo(self):
return [random.randint(1, 255) for i in range(len(self.target))]
def plot_result(self):
points = [f for f, ch in self.fitness_best]
fitness_best = [seq[1] for seq in self.fitness_best]
fitness_worst = [seq[1] for seq in self.fitness_worst]
fitness_average = [seq[1] for seq in self.fitness_average]
plt.plot(points, fitness_best, color='g', label='Best')
plt.plot(points, fitness_average, linestyle='--', color='b', label='Average')
plt.plot(points, fitness_worst, color='r', label='Worst')
plt.xlabel("Population")
plt.ylabel("distance from optimum")
plt.title('Genetic Algorithm')
plt.legend()
plt.show()
pass
pass
if __name__ == "__main__":
target_text = "Hello world"
limit = 1000
size = 250
prob_crossover = 0.9
prob_mutation = 0.1
save_best = 5
if len(sys.argv) > 1:
target_text = sys.argv[1]
pass
GeneticAlgorithm(GuessText(target_text, limit, size, prob_crossover, prob_mutation, save_best)).run()
pass
|
# Generated by Django 3.2.3 on 2021-05-29 16:10
import uuid
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
("authentik_flows", "0018_oob_flows"),
]
operations = [
migrations.CreateModel(
name="Tenant",
fields=[
(
"tenant_uuid",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
(
"domain",
models.TextField(
help_text="Domain that activates this tenant. Can be a superset, i.e. `a.b` for `aa.b` and `ba.b`"
),
),
("default", models.BooleanField(default=False)),
("branding_title", models.TextField(default="authentik")),
(
"branding_logo",
models.TextField(default="/static/dist/assets/icons/icon_left_brand.svg"),
),
(
"flow_authentication",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="tenant_authentication",
to="authentik_flows.flow",
),
),
(
"flow_invalidation",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="tenant_invalidation",
to="authentik_flows.flow",
),
),
(
"flow_recovery",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="tenant_recovery",
to="authentik_flows.flow",
),
),
(
"flow_unenrollment",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="tenant_unenrollment",
to="authentik_flows.flow",
),
),
],
options={
"verbose_name": "Tenant",
"verbose_name_plural": "Tenants",
},
),
]
|
'''Faça um programa que leia um angulo qualquer e mostre na sua tela o valor do seno, cosseno e tangente desse angulo'''
import math
ang = float(input('Digite um ângulo: '))
sen = math.sin(math.radians(ang))
cos = math.cos(math.radians(ang))
tan = math.tan(math.radians(ang))
print ('O valor para Seno é {:.2f}\nO valor para Cosseno é {:.2f}\nO valor para Tangente é {:.2f}.'.format(sen, cos, tan)) |
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
r"""Ground truth values for `synthetic_item_response_theory`.
Automatically generated using the command:
```
python -m inference_gym.tools.get_ground_truth \
--target \
synthetic_item_response_theory \
--stan_samples \
50000
```
"""
import numpy as np
IDENTITY_MEAN_STUDENT_ABILITY_MEAN = np.array([
0.07407780940224962,
]).reshape(())
IDENTITY_MEAN_STUDENT_ABILITY_MEAN_STANDARD_ERROR = np.array([
0.0008707657869347018,
]).reshape(())
IDENTITY_MEAN_STUDENT_ABILITY_STANDARD_DEVIATION = np.array([
0.11197835313284824,
]).reshape(())
IDENTITY_QUESTION_DIFFICULTY_MEAN = np.array([
-1.7386134475919999,
0.4970258902902354,
1.4344515468160002,
1.641429930172,
-1.001773881264,
0.2327278244741427,
1.1243957450820001,
-0.07559818087100739,
-1.4440410689700003,
1.5331072539439998,
1.0109075684440003,
1.0993926030260002,
1.0008861356779999,
-0.5607651194998506,
-1.839558199016,
0.6157032862357659,
0.28316118897229803,
-1.3930272628359999,
-0.1826951549828188,
-0.27245178856736113,
-1.3238985986440002,
-0.7889031974784,
-0.629952581408262,
0.90073743814,
-0.634451995103406,
1.07784211796,
0.39487005242700796,
0.7102433332581601,
-0.22943065911830432,
2.1791148153,
0.3098560530447122,
-0.4780290339603407,
0.4171185387587336,
-0.929333270144,
0.01294210999174574,
-0.16320760854581498,
-0.72957786402666,
-0.008774799387835337,
0.2759720422917563,
0.627329170600476,
-0.607592383396456,
-1.049087797634,
-0.236599294154692,
-0.024498208985064728,
1.291555356934,
-0.06889535398832192,
1.3722089347600002,
0.3805027608634775,
-0.0976881344257036,
0.965523356652,
1.183840026494,
-1.6039984834439998,
0.5329468077083179,
-0.5170399530101487,
-0.3217317937651375,
-0.8679235757419999,
0.7910547048319361,
0.5155459551574353,
0.932831712898,
-0.30386777959856576,
0.65220167045662,
0.23627015969942233,
-0.8982357515989999,
0.40516740677637825,
0.1100365360794964,
-1.2240389583979998,
-0.14391360383527788,
-0.931383929464,
-1.3094086595539998,
0.08232204964352068,
0.28010321076742106,
-0.21117574411016715,
-0.3036128893657573,
1.381981250282,
0.5698702942150142,
-1.1702289528979999,
-2.0633761805999997,
-1.258155670806,
-0.10903028372803954,
-0.5808749584725981,
1.6144416303100002,
0.3801686252274436,
1.229021142386,
0.7957122265339999,
0.8231759003185999,
0.862264107036,
-1.480945797932,
-0.05874234330316512,
-0.04207072342839674,
-1.4291671972400002,
-0.24515205664346684,
0.2174140920427668,
-2.30767872408,
-0.254459719645339,
-1.342952374268,
-0.39624365341949636,
-0.07064649124210468,
1.4426443779959999,
-0.7329086047286,
2.8889239935599997,
]).reshape((100,))
IDENTITY_QUESTION_DIFFICULTY_MEAN_STANDARD_ERROR = np.array([
0.0008302688548113903,
0.0008377517344524241,
0.0008321175849093665,
0.0008347728688133536,
0.0008281721337753032,
0.0008379325213730962,
0.0008292481395888176,
0.0008347895922602316,
0.0008279643036566825,
0.0008434549165918242,
0.0008318796317302861,
0.0008335214577500402,
0.0008298823723502089,
0.0008307300086792171,
0.0008312541076548566,
0.0008386895707374964,
0.0008364389939085418,
0.0008363893404864511,
0.0008317526678195214,
0.000833399635423499,
0.0008399245591492785,
0.0008244676504408693,
0.0008452481044063678,
0.0008357020080111191,
0.0008365818821648423,
0.000836739112200339,
0.0008325497987036042,
0.0008325827491677793,
0.0008304742207482917,
0.0008353939843338689,
0.0008327627836690308,
0.0008395559653867722,
0.00083471930030518,
0.0008306375242686843,
0.0008346349200822218,
0.0008379523345081949,
0.0008343677087542545,
0.0008263287958796913,
0.0008306656100065513,
0.0008329747087928184,
0.0008386372261955543,
0.000833040050935524,
0.0008357591099290488,
0.0008278722812990439,
0.0008317177959206316,
0.0008322555719500899,
0.0008284495734618935,
0.0008307460415443434,
0.00083431329264272,
0.0008318617754617958,
0.0008341511735774881,
0.0008318694755599152,
0.0008297461193562304,
0.0008319329352222202,
0.0008343048075938506,
0.0008372311587521407,
0.0008381539319232146,
0.0008308702096690273,
0.0008344629089750782,
0.0008289645967878066,
0.0008242539285040097,
0.000833993393978764,
0.0008364670310790112,
0.0008305226650771719,
0.0008356646160099807,
0.0008392421387432828,
0.0008264871417932329,
0.000827252487053729,
0.0008303942298105111,
0.0008304800196125167,
0.0008301383282270659,
0.0008359593595189047,
0.0008322258600597539,
0.0008270496330697541,
0.0008334836475599582,
0.0008352430204730789,
0.0008314542128219164,
0.0008365574367892898,
0.0008365269330817332,
0.000832120732131346,
0.0008425776087297702,
0.0008466629270993518,
0.0008318047534406957,
0.0008369411157594329,
0.000832539591661743,
0.000842619013391847,
0.0008350133163141549,
0.0008250079348652423,
0.0008290982368429107,
0.0008316588928771712,
0.0008267783479902109,
0.0008307608606418441,
0.0008412217881701981,
0.0008366204539802397,
0.0008335447678819271,
0.0008411102156922019,
0.000834263313320927,
0.0008255609425307198,
0.0008292887956098761,
0.0008342698890570286,
]).reshape((100,))
IDENTITY_QUESTION_DIFFICULTY_STANDARD_DEVIATION = np.array([
0.19017135448372158,
0.162819951082574,
0.17402545582900222,
0.1820817104371582,
0.17366555964694247,
0.1571395384782796,
0.16772167706384994,
0.16136687759851204,
0.18010916364899882,
0.17838892755633004,
0.16686427951436475,
0.16796625276407362,
0.16815499080699614,
0.16361926806791727,
0.19334303017815638,
0.16009115090484416,
0.15952084094147587,
0.1760441353594948,
0.15994730382234315,
0.1621617301282396,
0.17296681112317416,
0.16525565942598144,
0.1648193502075456,
0.1648551685980192,
0.16593273911684836,
0.1679321535199737,
0.15942540730027077,
0.16212928413872107,
0.16430309343396984,
0.19871247126425526,
0.16125748282508606,
0.16311682757398924,
0.16480311538358602,
0.16688833742969125,
0.1615286497952559,
0.16179329412597496,
0.16562234292717024,
0.15912745401827486,
0.16098194022840587,
0.16366689611446134,
0.16316840530746693,
0.1704103020358691,
0.16397021517071111,
0.1586622047109239,
0.1735483326629893,
0.160958350599367,
0.17763375253878508,
0.15998639694393943,
0.1633977627750487,
0.16459602822272784,
0.16669677298727512,
0.1843541999111087,
0.16320312725822309,
0.16322279440335835,
0.16124012910420343,
0.1666897685147499,
0.1609479273337171,
0.1616371666386907,
0.16616042903371395,
0.1619882063583902,
0.16423776637599816,
0.16195949947738947,
0.16718000871582586,
0.1630887022032364,
0.1599585331515819,
0.17402542003665328,
0.160134484479293,
0.16623035308753262,
0.17762185774875405,
0.16281794094912955,
0.16138382477254196,
0.16312302474809293,
0.16122549070334602,
0.16985285764238753,
0.1628811634633817,
0.17436778068388226,
0.19976911365410505,
0.17212764494973917,
0.15726283514804865,
0.1639711864113929,
0.18100393485540206,
0.16197457263789833,
0.1733668443106707,
0.16349179347804524,
0.16423000212969757,
0.16434776401329063,
0.18065316238254644,
0.16177087036034404,
0.1599154184055926,
0.17696293715489916,
0.16177582267446725,
0.16119554373748715,
0.20628526679004286,
0.16105394199241257,
0.1745881166840509,
0.16174697192838156,
0.159861503742484,
0.17633579241510886,
0.16560336771279552,
0.23592272716407298,
]).reshape((100,))
IDENTITY_CENTERED_STUDENT_ABILITY_MEAN = np.array([
-1.53145329601,
-0.1037224805984333,
-2.5346325862340002,
-0.16659334202409193,
0.6892170580765797,
-1.6299039882208,
0.5759216326428854,
-0.6154707631470533,
-1.324495666496,
0.26573155400430587,
0.7652577314690523,
0.45544409607823655,
0.5238103874501941,
-0.07197163890352715,
-0.4030064082278912,
-0.06367757260774272,
1.683101206428,
0.22385358625675966,
-1.1513866702117999,
0.548573036566933,
0.95948719891944,
0.09658737436078371,
-0.25508354878122896,
0.3639320319599111,
0.13172360741438932,
1.0503049746552402,
-0.45477945750018983,
-1.2402264559735998,
0.024592244501932516,
-0.1869129383995491,
-0.20193764465813402,
0.7348782572302233,
0.3891360234108299,
-0.8764053289629732,
0.6220997721656951,
-0.5073001317697317,
0.5771299509476774,
-0.3244893565756618,
-0.5967372128658794,
0.8906601567501482,
0.5054662723805192,
-0.8233170183430492,
-0.94152715900854,
0.055102367547591215,
0.2875787010788121,
-0.11052142642368387,
-2.45172990148,
0.4821019754025249,
0.27328559476535663,
0.6733012052892752,
-0.5971436128798498,
-0.32638502085487964,
1.20211811798238,
0.007431183893937379,
-0.1617339404895533,
-0.5249321455865328,
-0.2957169904034686,
-0.18469260962941791,
0.18060040981340936,
0.35473971988064823,
-0.6641263517903532,
0.2525559187533776,
-0.7539204182644546,
-0.3329978298430479,
-0.23733742775601177,
-0.760337975514854,
-1.4409460175180002,
-1.8339906816679998,
0.30437866433659694,
0.40769991369333053,
-0.2635777799139569,
-0.024379979858648763,
1.191782711065,
1.8064755678079998,
0.08579855190709651,
-0.3495977374279837,
0.5307887255771849,
-0.32494071333992347,
0.9759680251663667,
-1.708893196914,
1.19285887578,
1.464530109598,
-0.047362513238781816,
-0.7175787668705759,
-0.3554173763286351,
0.7810222199058319,
0.0445688914121763,
-0.7058807897119578,
-1.4744592470979998,
0.96031031521877,
0.15924372958751498,
-0.4557929645883889,
1.49168204254,
-0.5948206225035152,
0.991245166309044,
-0.2852341028182611,
0.4404589654398972,
0.15386423849687042,
-0.72617522138649,
-1.20941258117032,
0.22926366029767,
0.5272633759166432,
0.1501467809611307,
-1.16153582106686,
1.5419183981584,
-1.3076265565060001,
-0.4238912646086758,
-0.014768106259741082,
-2.5742700496879993,
1.2329021759498002,
-0.3422795359497348,
1.1682863691695182,
-1.029187952559037,
-0.11244665804012433,
-1.6738904437400002,
-1.1391955747795999,
-1.0902649463342,
0.996376733766717,
-0.0036802422203173974,
0.8527621863064899,
-0.21480769984946807,
1.4014786028615398,
0.3742453176585132,
0.16925068096584048,
1.4265286825399999,
1.1742408021920803,
1.8449258754400002,
0.7205603994245529,
-0.23815166731438545,
-1.7780341281040002,
0.1620075008428395,
0.7325721512168284,
1.5654199944240001,
0.26359842124964705,
-0.46459631768461057,
-1.511834487334,
-0.1625684778689565,
-0.367417644599748,
0.6468749458098315,
0.1373447899928501,
0.9399144748567302,
0.6760378888460012,
0.11272579724195722,
0.8259415655944972,
-0.26474734528528693,
0.45917341156354796,
-1.459245022712,
-1.02317726654718,
1.978101300314,
-1.9986051595099998,
-1.68247514906,
0.5779341614433877,
-1.372992027352,
-0.32739473994169555,
-1.5945308304920003,
-1.0624877080104003,
0.9614425731603798,
0.6047045076063853,
-1.68376286521,
0.5589677921527343,
0.6137186195302284,
0.18753607337665934,
0.9890195621768738,
0.8095195434092533,
-0.3965986380923612,
1.377445820116,
-0.06838985422610416,
2.046548337596,
0.9164617542965511,
-0.3195634095572081,
1.0314516889874399,
0.7108559007045535,
1.862238528658,
1.3621603174541999,
-0.8267514363812998,
-0.02198228824454651,
0.7617448542479179,
0.44623613896832126,
-0.4158880793516606,
2.4915407061940003,
0.5118227624437395,
0.16764792450445937,
-1.097660859342,
-0.48194520472199337,
-1.2524319852227999,
-1.3287868312196,
-0.934167280992282,
-0.006831081480208742,
-0.03926775662275091,
-1.35273229923,
1.547476519466,
1.9184099788800002,
0.4773887663715793,
-1.02138403404478,
0.8504069605922153,
-1.737402300094,
-1.681245128506,
-1.004367689798912,
0.6467059961667293,
0.29001215231698685,
0.3648311896598303,
-0.26138861419812937,
-2.279267307414,
-0.0906688334915982,
-2.031979872194,
-2.3078282275440003,
-1.2519677743818,
0.7470155445163321,
-0.5818845335287651,
0.08863238859381536,
0.96987595096648,
-0.7213854069945482,
-0.2012063756434957,
-1.2219291561363002,
-1.5417369252720001,
-0.545967865179587,
0.8798168529091672,
-1.7416170762179999,
-1.2491089371948,
1.426395042546,
1.784180046686,
-0.18744309517350596,
1.2823936394620001,
-0.5633369317002368,
2.1333581553420005,
-0.04484150799137086,
1.35685118387,
1.439285213142,
0.8845448792462859,
0.6252844100642271,
0.909542824717998,
0.5839097948280497,
-1.7248625863540004,
0.21788342843553538,
-0.6435830284776212,
1.3210799375539999,
0.1415838021001044,
1.8362484038000002,
-0.2019487394330007,
-1.6032595213639997,
-1.0205622517486,
0.08421743324607722,
-0.5810183405009883,
1.0852629189064,
-0.7776757258059481,
0.21938508585720937,
-1.2181636793408,
0.8204301774150642,
-1.522501076094,
1.4530306969039999,
2.235335521786,
0.5477739229160095,
0.35840048782709544,
1.0167107552091,
-1.1864072267484,
-0.12236532921975521,
0.1337068970997462,
0.2141363627733126,
-0.22390538554576436,
0.93889574165009,
-0.05000929121623153,
1.7368487523799998,
-1.07996802297094,
-1.28701371198194,
0.5689810048889178,
-0.6128465630766041,
0.3370081809060027,
-0.4519495649238655,
-0.37731409274448513,
0.8724509235420881,
0.5619742599209652,
0.6004137651405483,
-0.576758348260154,
-0.8571399339024343,
-0.25358060126713355,
-0.011058305219536048,
0.050647048616681165,
-1.3917808357538,
1.45607680543,
-0.0879312861849905,
-1.654818923574,
1.555989578466,
2.287911787794,
0.049078671349419956,
1.10820104755586,
-1.0565198412581478,
-0.48633814120482877,
-0.096486118539407,
0.840825936870066,
0.409057122325647,
-0.09663701001849348,
-0.22716427462073688,
-0.9052941303019708,
-0.48155094190290687,
0.10716636791521161,
-0.9191790975503323,
-0.5462467839702463,
1.5756277384039998,
1.589474114636,
1.0643347737643203,
-1.7413775472159998,
-0.029742328333474354,
-1.082055631271144,
-1.003215040359684,
-0.24980362354872584,
-0.24579682722733484,
0.6441752072047096,
1.5448193510200003,
0.4303396845693279,
-0.9798921964558998,
0.09703427879389323,
-0.9501522859627786,
-0.22866924287891927,
-0.8176927980886936,
-0.020132374965521324,
0.25133523889129894,
-0.30283341985744244,
1.1134931961458803,
-0.6693667692775632,
0.47177153321636406,
-0.17964803201251905,
-0.6202410169739533,
-0.12483988879780886,
1.1535818084061,
0.2437626226045569,
0.05486687186624295,
0.2576491588469953,
-1.5055650762579995,
0.3306273076220177,
-1.2455774615348,
-0.2363778988814813,
1.278181048935852,
2.0080525314939996,
-0.9766116511207219,
0.046840084894934284,
0.0059391695437916595,
-0.6038452662638061,
-2.400121061816,
-0.9982711311217599,
1.9224614175519998,
-1.142826721446204,
-0.12573405741197605,
-0.947665710873094,
0.6661696920140205,
0.4715697391822512,
-0.8385178105043781,
-0.007437700784033481,
-1.1493230316026,
-0.5275137858395749,
1.3799628333996,
0.5954848667305329,
-0.22227881062663765,
-0.31437605125491186,
0.5469087128027968,
0.6068641291515311,
0.2855400400124261,
-0.8716571643172324,
-0.07494768277257863,
0.4160823900674475,
-0.09069102924585433,
0.16808100918351976,
1.1672069613254001,
0.4087921158301694,
0.35216162425810726,
0.3641058507879814,
-0.6930345286539217,
0.660464211651983,
0.4886364312786261,
0.2282901088633266,
0.2901760382059634,
0.4111575398698338,
0.3289383994245535,
0.7085954448202112,
0.958119369306554,
-1.1877120774862,
0.5490385521521295,
1.679664647616,
0.5038420360679283,
-1.1671936104944003,
0.027194053939753444,
-0.2589797922975594,
0.38039782810007666,
0.14086498991109658,
0.37141237054180143,
1.1277811727083802,
0.1070007410622342,
-1.2661088301419998,
1.559182573428,
1.4419045370119998,
0.5013072236186092,
-1.537873881282,
-1.2839242075015997,
-0.29227794986234923,
-1.399358003376,
-0.4731158765279423,
-0.3554884864521684,
-0.7558993274806871,
-0.6073432014296761,
0.39718177205072713,
0.4216300384822055,
]).reshape((400,))
IDENTITY_CENTERED_STUDENT_ABILITY_MEAN_STANDARD_ERROR = np.array([
0.0003807302090591504,
0.0003522319768193471,
0.0004612581242175986,
0.00037233459890380496,
0.00036813477007659065,
0.00040346353947745594,
0.00037716760132062565,
0.00036440615018271604,
0.00038338353360423095,
0.00035551931808240096,
0.0003878285060591543,
0.00036553540889735944,
0.0003672849144539399,
0.0003559158675279467,
0.00036774637765694685,
0.00035584316175129406,
0.0004100652510117891,
0.0003614737870315838,
0.0003763080829250638,
0.00036258399122052497,
0.00037062337188633026,
0.00035952242226359614,
0.00034949770550229606,
0.00036135250077889904,
0.0003507853040426144,
0.0003739014924998915,
0.00035925606627960937,
0.0003918742701085739,
0.00036488097277403373,
0.0003539950197775601,
0.00035788937003609407,
0.0003810531574596075,
0.0003530930989026296,
0.000373652718691511,
0.000383935808921923,
0.00035974094297714005,
0.00035945313729555576,
0.000356307484630763,
0.0003600378876822727,
0.0003719156257651956,
0.00037539407688620056,
0.0003598352772400606,
0.0003675260846244555,
0.00037320139465658896,
0.00037363121862067964,
0.00035568866619706724,
0.000455593887580432,
0.00037463987029125173,
0.0003529895206765149,
0.00036433494782583915,
0.00036675435173932913,
0.0003642664467200116,
0.00038735721352350444,
0.000370165099303707,
0.00035481420011045827,
0.0003666255174125307,
0.00036725408498696865,
0.0003668077438855692,
0.00035567673907868766,
0.000358763844540175,
0.000369377234552482,
0.00036942748008708535,
0.0003617971386527043,
0.00035199369056704017,
0.0003600321834619692,
0.0003641632702028392,
0.00038046767184268533,
0.00040095450783722965,
0.000369067240014855,
0.00036255496810369184,
0.00036965259314812864,
0.0003582663673375366,
0.00037383361304930767,
0.0004087152848087334,
0.0003614546506474394,
0.0003540879274365029,
0.0003610307423805708,
0.00036035278020724786,
0.00036896278024262644,
0.0004135925755213107,
0.0003723170552022848,
0.000381127959602535,
0.00035709960118430513,
0.00036550104612138016,
0.00036400837309123486,
0.0003682639794316729,
0.00035697903982153577,
0.0003725223269775804,
0.0003913300917201865,
0.0003849373110788112,
0.0003597848118033653,
0.00037200795208787305,
0.00039898713244806066,
0.0003672184918186208,
0.00036814988237644336,
0.00035101942382774584,
0.0003679460362332782,
0.0003526543221436749,
0.0003678829050565934,
0.00038747124574698094,
0.00035415662376175265,
0.0003762103071361496,
0.00035819290608919674,
0.00038700899517328573,
0.0004016959254991072,
0.00037461980749164936,
0.0003678306991589346,
0.00035138047158923347,
0.0004567454202688617,
0.0003799345604212541,
0.00036245215041054375,
0.000386248142688457,
0.0003688397776543128,
0.00036352189158027923,
0.00040052048371455904,
0.00037953770231504834,
0.0003805847868712634,
0.0003653802170589804,
0.000364608962886792,
0.00037826215143391644,
0.0003651780171923358,
0.0004012466537631555,
0.0003765363298664247,
0.00036275617949365377,
0.000380564531763498,
0.0003793092227224019,
0.0004240988403902765,
0.0003617351088771606,
0.0003666715898575965,
0.0004052767112719767,
0.00036621383398398875,
0.0003696887506493269,
0.0003984845256617444,
0.0003677710693980309,
0.0003621443520454881,
0.0003950132149528491,
0.0003432926613372743,
0.0003641850557654326,
0.0003676036258547077,
0.0003588796772385077,
0.0003682293331372077,
0.0003660702457507916,
0.00035516638627752,
0.000370852192373612,
0.00035310276189749284,
0.0003714396274595556,
0.0003790063312430169,
0.0003759663040796095,
0.00040772126780319785,
0.00040366879952233314,
0.00039506613767557056,
0.00036291280849009834,
0.00037525308260390856,
0.00036414631519378685,
0.0003955039932472967,
0.00037762600352939335,
0.0003699447965960905,
0.00037461162440837815,
0.0004075696833431024,
0.0003689658866572577,
0.00034949146586574063,
0.00035882019341426066,
0.00037662952086823685,
0.0003719831645069374,
0.00036382089718362833,
0.00038929620653461246,
0.00035387697589122524,
0.0004381846293264442,
0.0003664333752466361,
0.00036054828637699274,
0.000371383877655576,
0.0003709123062027061,
0.0004168290118117364,
0.00039517545692547564,
0.00037226316697589753,
0.00037034688815825925,
0.0003697448537950147,
0.0003609366563049233,
0.00036345888618116647,
0.00046859265761287647,
0.00035721704117589565,
0.0003663697501293001,
0.00037782899071418173,
0.00035671820586256057,
0.0003891940868550363,
0.00037878718171461965,
0.00038371942337721564,
0.0003580556436759882,
0.0003630539024365288,
0.0003854670288165862,
0.0003914478014214124,
0.0004163329814745157,
0.00036460977031165957,
0.0003687311306047335,
0.00038518911923230337,
0.000412893566103962,
0.00039652549633799436,
0.00038794730288420456,
0.0003704237010648445,
0.00036799602571981364,
0.0003604353639146432,
0.00036547742928468545,
0.0004555573092694083,
0.00035075158947902555,
0.0004208422868677193,
0.00043741350915649674,
0.00037435552675476333,
0.0003636880184436686,
0.0003562922353409024,
0.0003719484411337716,
0.0003728730973876976,
0.0003741061821975788,
0.000365773818221504,
0.00038179893805347645,
0.0003805261674614682,
0.00036699822153150587,
0.0003698498905112882,
0.0003903428900367587,
0.00037725081915066346,
0.0004044262640293855,
0.00041860787526739517,
0.0003577066501726797,
0.00037892621734971133,
0.000362959011114616,
0.00045002118491843505,
0.00036380851700783663,
0.0003773511300547562,
0.0003917472586305837,
0.00036277610092309246,
0.00038314393650407336,
0.0003688956843713889,
0.00036309555337999256,
0.0004265472404619288,
0.00035703504744172,
0.00036221099190272013,
0.0003820099813639321,
0.00036334876423314085,
0.00042931278296406896,
0.000364806758778768,
0.0003770688886557176,
0.00036730304437165926,
0.00035816051675740537,
0.0003541142792497701,
0.0003881009361259791,
0.0003725741170525413,
0.0003718129053264026,
0.000370270557700957,
0.0003758347558785034,
0.0003961814955513416,
0.000385312488406763,
0.0004392452018197106,
0.00036869036616538926,
0.000374666153433015,
0.00036288941115501946,
0.00037584714700667534,
0.0003640704737019748,
0.0003644895411918226,
0.0003538984814201486,
0.00036472011466662517,
0.0003689003788535386,
0.0003691853799068832,
0.000416380299337264,
0.00036190811348160023,
0.0003753197361449902,
0.0003672347995766353,
0.0003675511098308543,
0.00035352129408548885,
0.0003585069108085392,
0.00036599380952045913,
0.00036350510576376344,
0.000371432758361592,
0.00036866506480644266,
0.00037053853107533996,
0.0003572077295874529,
0.0003683914801964893,
0.0003648159405076817,
0.0003562655047625727,
0.000403375322273615,
0.00039949338681271235,
0.000379102265376018,
0.000409931871492805,
0.0003932527531735471,
0.000436597784539377,
0.0003600490616658104,
0.0003891875674337495,
0.00036957226478249154,
0.00035342782946904154,
0.0003626533840942856,
0.0003702441341222138,
0.0003643505758639582,
0.0003536051714895676,
0.000377525436297279,
0.0003830511209776495,
0.0003596789889391157,
0.00036379582672639246,
0.0003715530663200229,
0.00037315439798680746,
0.0004017409398717018,
0.0004109307223246848,
0.0003820512689593424,
0.0003973311157613765,
0.0003569094217948375,
0.00037551505993846536,
0.0003701537878040545,
0.00036072686122383516,
0.0003523569290086269,
0.00036750463325126013,
0.0003949306587657855,
0.00037003876828469655,
0.0003682657333053287,
0.00036072069153839537,
0.0003783540918314163,
0.0003652452639984381,
0.00036910287850852667,
0.0003617361977340996,
0.000371069664140319,
0.0003636867776412704,
0.0003733552646810362,
0.0003642719885312381,
0.00036295018275303527,
0.0003606321467873895,
0.00037426628888793744,
0.00037300664834235284,
0.0003784877139382366,
0.00035635643204248285,
0.0003635298932313777,
0.00037387964614753134,
0.0003848988576014691,
0.00037040846085119734,
0.000377322149229602,
0.0003726480391635172,
0.00038244295350270304,
0.0004255783290901482,
0.00037102506740369746,
0.00036130410686998867,
0.00034978139532361495,
0.00037029564103098977,
0.00042911874148569666,
0.00037604008635943334,
0.0004454009481920099,
0.0003901307038984549,
0.0003757699834126286,
0.000370912844545672,
0.00037032189045495696,
0.00036605897754326216,
0.00036012527650128014,
0.00036192401388643563,
0.000363925013289729,
0.000364603469657287,
0.00039076439653393914,
0.00036094082283042,
0.00035873139418905465,
0.00036627932170654363,
0.0003750214573050499,
0.00036434729367850003,
0.00035878891413729876,
0.000374899005486041,
0.0003665897964713611,
0.00035388747073936556,
0.0003575134823336028,
0.000359987621262818,
0.0003776514377249224,
0.00036239457382840335,
0.00036508855372201405,
0.00035228063202594965,
0.00037474523932526366,
0.00035892474517833266,
0.0003660691816453079,
0.00036579093656706306,
0.0003696000194876975,
0.0003657733844544186,
0.00036072720584771613,
0.00037543824926729915,
0.00036176653791076,
0.00038761400074055437,
0.0003649893982094239,
0.00038546866508923095,
0.00036532735340006104,
0.00035523125391524686,
0.00036757872019585306,
0.0003591065221869966,
0.00035769186496412706,
0.00036089089661395295,
0.00037555550743583155,
0.0003824322930941847,
0.00035958346298408116,
0.0003707048052761983,
0.00040627179899909913,
0.0004005050949485239,
0.000374085983650575,
0.00038223179110346916,
0.00038092474173882253,
0.00035763246212672597,
0.0003771685540587141,
0.0003624347182018015,
0.0003603300710283619,
0.00036041302648390065,
0.0003718109978406836,
0.00037141759129414963,
0.00036169298560581126,
]).reshape((400,))
IDENTITY_CENTERED_STUDENT_ABILITY_STANDARD_DEVIATION = np.array([
0.2791090859688944,
0.24336611422837376,
0.36488692494203334,
0.26018895473099024,
0.2511594289001522,
0.2986992866573224,
0.25762706405510805,
0.2570941822037678,
0.28273088076979863,
0.25691498392195766,
0.2695415193098069,
0.26260821352992314,
0.2604856275626116,
0.24721600067764782,
0.2610540342671481,
0.24720639782118434,
0.30855546810084217,
0.25856536828353893,
0.2758506049677941,
0.25574614430139153,
0.2708123083380415,
0.24500437243787507,
0.24135314849422698,
0.2560154401471106,
0.24132530628703272,
0.2695215283822754,
0.24872132141141604,
0.28540385895697995,
0.25121663717159554,
0.24315075978233,
0.24993846595083263,
0.27188832402009727,
0.23610580622883276,
0.2748067175419838,
0.2794311787777723,
0.2525963319198097,
0.24498433483221435,
0.2550849692565974,
0.25056828587590957,
0.2730984535597845,
0.2525550378893956,
0.25705979784826855,
0.25336774670357964,
0.2625639509250063,
0.2615016961696125,
0.2478446098247033,
0.3706619032362402,
0.2721104219396517,
0.2547605741351998,
0.25744218189656326,
0.2535551772648442,
0.255151893725088,
0.28326556513292955,
0.2570822971742373,
0.24469798086476863,
0.25185648939454486,
0.24558990083321083,
0.26324598590824116,
0.25293007894185804,
0.2439706424342551,
0.268124375948917,
0.2552230492834867,
0.2577849693131578,
0.24563382105052134,
0.26077683404238794,
0.2675695661856813,
0.27765626662304654,
0.3040410369250185,
0.25804948426308677,
0.2491917842488701,
0.2454533427973807,
0.24634281409225173,
0.2725895760070548,
0.31487922691516385,
0.24717719451828865,
0.24078790971335592,
0.24438981907803076,
0.2534966611673324,
0.2662138910649684,
0.3056252061333474,
0.26715718769129265,
0.2741468282110619,
0.2526001189473998,
0.2633496782137194,
0.2545974734718209,
0.26723717309701345,
0.24399931887365622,
0.25696373939435296,
0.28618043109070734,
0.27926978390379015,
0.2515093817147499,
0.25826754093078336,
0.3008294466251549,
0.2512858187655407,
0.2687126304923245,
0.24095702987274095,
0.25881204528138174,
0.24070902223659446,
0.25852272504393314,
0.2781185408770773,
0.24799855391507072,
0.26581376077742214,
0.24628475111659723,
0.2810258704677224,
0.30141788587509755,
0.27488261879896536,
0.2554797610258617,
0.24724239271687729,
0.3653993104787058,
0.28313807441613215,
0.24631215611434137,
0.2855485842686902,
0.2620100011768667,
0.25069081956572015,
0.298899897642608,
0.2754209546257453,
0.27109833160368835,
0.26293191110933,
0.25202914312623703,
0.2739028748721043,
0.2564665943891778,
0.305086977681739,
0.25884774164269786,
0.2582601530908254,
0.2799436296926646,
0.27497089417026327,
0.33025678971602535,
0.2550035051722592,
0.2563868018180513,
0.30410687146615123,
0.256154575553928,
0.26712953038055465,
0.2984487116991167,
0.25745216414985556,
0.2610758337740001,
0.29939234482885696,
0.24101448794144797,
0.25035831861484253,
0.2645405993048917,
0.25099771928256265,
0.26663483147590933,
0.26519591337455206,
0.2509018861298411,
0.2741645501661662,
0.23909640522841008,
0.2650778697941769,
0.27156648081833945,
0.27237682823419307,
0.3137881383151027,
0.3063628120099805,
0.2914831884100581,
0.25223772913353476,
0.2735979015246753,
0.2580483556449678,
0.2987255590983629,
0.28126878008555734,
0.2649849782034522,
0.26538866638713493,
0.3076189078674919,
0.26538170177042175,
0.24289705790737148,
0.25300916927231654,
0.2706830440046038,
0.26416941760162177,
0.2510320672357696,
0.27998417088863464,
0.24166184753502926,
0.3472118868257862,
0.26256072596398783,
0.24897199442935883,
0.27418886360985234,
0.27091568262890026,
0.32045013392765165,
0.282568610532477,
0.2597404735808616,
0.257989842363703,
0.2681895254569828,
0.2625635437839696,
0.26055110922162733,
0.383911029207819,
0.26050244444017084,
0.2570758800753336,
0.26984726430607187,
0.25340432729271867,
0.28656054354632904,
0.2690441819506373,
0.2737299430614805,
0.2513214610100176,
0.2533661129830128,
0.2828849149080148,
0.2915563039217558,
0.3205484300367759,
0.2512688007881092,
0.26294404787989717,
0.26892740261652537,
0.32287700571997097,
0.29165485945053377,
0.27361430788308216,
0.2532239373678148,
0.2630272919883222,
0.25752669177127135,
0.2561164144476695,
0.3557402358006939,
0.24408331910983216,
0.325702771429707,
0.3427785403967338,
0.2727648898741722,
0.25962426154561036,
0.2500984680653627,
0.2618965985022919,
0.26479723913685904,
0.2681486456822141,
0.24872645468050741,
0.2758038957731256,
0.28856832250567843,
0.2673669580224761,
0.25510514852447147,
0.2918063322397543,
0.27023096787145817,
0.29657132145663023,
0.3212807517200081,
0.2493591205917213,
0.2739309182704085,
0.25266005392607205,
0.3487501679388922,
0.2530649492022645,
0.27646954058407697,
0.28596744434967053,
0.25866921851209124,
0.2773964446508175,
0.2697509713809395,
0.2647393536163582,
0.3220689838490454,
0.25036331807135603,
0.2626897216133877,
0.27864522371495615,
0.25629503857968466,
0.33234784562285385,
0.2602837553873588,
0.28233041034198236,
0.2708837068348031,
0.25089398525771955,
0.24591899289512612,
0.2937185434300251,
0.2673630861190074,
0.2607150390963616,
0.2682987961935724,
0.26534550596617346,
0.2905842246100582,
0.283167959537041,
0.3423514636691657,
0.27125697418902456,
0.26207846631569626,
0.2679342602933034,
0.2728067553498931,
0.24930845660634882,
0.2629162752425466,
0.24261711388578666,
0.2565257584469081,
0.2650410869025861,
0.25197080340934186,
0.31075255559930637,
0.25854116404301597,
0.2736684502877284,
0.2620243832090361,
0.2613185406380435,
0.2552143432033847,
0.2433824816143216,
0.2538261414397999,
0.25982557184073396,
0.2632873040084761,
0.2667663027932888,
0.2609566047817372,
0.25940820099040574,
0.25707444776657595,
0.2565239065210773,
0.24523338738109116,
0.2994858773613719,
0.2948700075095576,
0.27082530936180527,
0.30627066796015356,
0.2931662517611125,
0.3545184965520302,
0.24866625666453118,
0.27861071935216986,
0.2639965032512805,
0.24459360345125383,
0.26183654205577805,
0.2675007295508,
0.25335054538511365,
0.25528097101686537,
0.26484700344330964,
0.28138650919044633,
0.2498944329341119,
0.2604563875916717,
0.26166882295507404,
0.2608050264872015,
0.2998548539301914,
0.3075410373041991,
0.28122010210308857,
0.29700323571126364,
0.25857892600817073,
0.27693549200200995,
0.2614167920718378,
0.25227843203123085,
0.2450179702732253,
0.26729321673951995,
0.2975012692503899,
0.2613424411686331,
0.2673662572079564,
0.2574412888082601,
0.2742384855106804,
0.2547892897433899,
0.2708322212449492,
0.2556519665561132,
0.2625410816300757,
0.26972310164382823,
0.2698529278785143,
0.2569436227082871,
0.2529405660275751,
0.25397117342987763,
0.26872874728630036,
0.2677297908348233,
0.27378847375549,
0.2456498721226803,
0.2578097780284807,
0.2583542154415034,
0.2919876422728699,
0.26217618142653837,
0.274337128803326,
0.2582892645892646,
0.2732577979943262,
0.3374419829795458,
0.26357909089067666,
0.24926309807113575,
0.24219165523765046,
0.25702593802716,
0.3382896728648203,
0.2720436608257121,
0.35302987925578344,
0.28662665818642685,
0.264700483789464,
0.2619217992190076,
0.26537366001751816,
0.2546191007394235,
0.2571973682122088,
0.24790933645942959,
0.2612423406707779,
0.25260614575056983,
0.29937532659953925,
0.250236033168029,
0.25093676665287334,
0.2520000659806788,
0.2584095733583529,
0.26174686311024714,
0.25257421403730895,
0.2618658313213285,
0.2591516195561422,
0.2463739702873769,
0.2546007777593006,
0.2550097758403467,
0.27689494135681864,
0.2543644941373353,
0.25890700383357645,
0.24656132398443947,
0.26504416301906913,
0.25973220772639954,
0.2571657464776395,
0.25249577369984494,
0.2582673768170921,
0.2525367220756647,
0.24756331138521995,
0.27397364914823213,
0.26531392689606953,
0.2777931390263465,
0.25719766678363903,
0.2983622368770872,
0.2558656717901,
0.2536996834148389,
0.2520603133326972,
0.2516849070915258,
0.2601904867135497,
0.2556761106007976,
0.26262709131134687,
0.28331062764652437,
0.2581051312929231,
0.2671924923775289,
0.3060407132458062,
0.2998580192800292,
0.2757486432174753,
0.27936044376100627,
0.2789154591086189,
0.24838324677018875,
0.2765160229171123,
0.24841840617501038,
0.2539376325457054,
0.25798554006782065,
0.257360396807066,
0.26017033675196327,
0.2508344710663564,
]).reshape((400,))
|
import json
data = """ [
{
"First_name":"Cristian",
"Second_name": "Santiago",
"Last_name": "da Silva"
},
{
"day": "31",
"month": "October",
"year": "1991",
"age": "29"
}]
"""
info = json.loads(data)
print('Idade:', info[1]['day'])
|
import pandas as pd
my_data = pd.read_csv('Users/meyhel01/Documents/Traitify Excel/traitifydata.csv'', sep=',', engine='c')
labels=[]
for i in sorted(traits):
labels.append(i)
frame = pd.DataFrame(my_data, columns=labels)
frame = frame.corr()
frame.to_csv('corr_matrix.csv')
|
# -*- coding: utf-8 -*-
from datetime import datetime
from flask import Flask, jsonify
from flask_restful import Resource, Api
import logging
"""
time-api
"""
app = Flask(__name__)
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = True
api = Api(app)
log = logging.getLogger('werkzeug')
log.setLevel(logging.WARNING)
class HelloWorld(Resource):
def get(self):
return {'msg': 'Hello World!'}
class TimeV1(Resource):
def get(self):
m = {'datetime':format(datetime.isoformat(datetime.now())),
'version':1}
return jsonify(m)
api.add_resource(HelloWorld, '/')
api.add_resource(TimeV1, '/api/v1/')
if __name__ == "__main__":
app.run()
|
#!/usr/bin/env python
# __BEGIN_LICENSE__
# Copyright (c) 2009-2012, United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The NGT platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __END_LICENSE__
"""Convert all TIFF files output by reconstruct into JPEG files viewable
in a web browser for debugging."""
import os
import stat
import re
BASE_DIR = '%s/projects/StereoPipeline/reconstructTest' % os.environ['HOME']
RESULTS_DIR = '%s/results' % BASE_DIR
COLORMAP = '%s/projects/VisionWorkbench/build/x86_64_linux_gcc4.1/bin/colormap' % os.environ['HOME']
def getTime(f):
try:
s = os.stat(f)
except OSError:
return 0
return s[stat.ST_MTIME]
def dosys(cmd):
print cmd
os.system(cmd)
def convert(tif, opts):
stem, suffix = os.path.splitext(tif)
out = stem + '.jpg'
if getTime(tif) > getTime(out) or opts.force:
if re.search('dem', tif, re.IGNORECASE):
# file is a 16-bit TIFF DEM -- colormap to tif then convert to jpg
tmp = stem + '_colormap.tif'
dosys('%s %s -o %s' % (COLORMAP, tif, tmp))
dosys('convert %s %s' % (tmp, out))
dosys('rm -f %s' % tmp)
return 1
else:
# file is an 8-bit TIFF -- contrast stretch and convert to jpeg
dosys('convert -contrast-stretch 0x0 %s %s' % (tif, out))
return 1
return 0
def doit(opts):
tiffsName = '/tmp/convertAllTiffs.txt'
dosys('find %s -name *.tif > %s' % (RESULTS_DIR, tiffsName))
tiffsFile = file(tiffsName, 'r')
tiffs = [line[:-1] for line in tiffsFile]
tiffsFile.close()
numConverted = 0
for t in tiffs:
numConverted += convert(t, opts)
print '%d of %d images were up to date' % (len(tiffs) - numConverted, len(tiffs))
def main():
import optparse
parser = optparse.OptionParser('usage: convertAll.py')
parser.add_option('-f', '--force',
action='store_true', default=False,
help='Convert all files, even if they are up to date')
opts, args = parser.parse_args()
doit(opts)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Test string manipulation
"""
import os
import commonlibs.strings.strings as s
HERE = os.path.abspath(os.path.dirname(__file__))
TEST_FILE_a = os.path.join(HERE, 'a.txt')
def test_is_string_in_file():
assert s.is_string_in_file("phone number", TEST_FILE_a) is True
|
# -*- coding:utf-8 -*-
import urllib2
# 构建一个HTTPHandler处理器对象,支持处理HTTP的请求
# http_handler = urllib2.HTTPHandler()
# 在HTTPHandler增加参数"debuglevel=1"将会自动打开Debug log 模式,
# 程序在执行的时候会打印收发包的信息
http_handler = urllib2.HTTPHandler()
# 调用build_opener()方法构建一个自定义的opener对象,参数是构建的处理器对象
opener = urllib2.build_opener(http_handler)
request = urllib2.Request("http://www.baidu.com/")
response = opener.open(request)
print response.read()
|
# Class to store information about a known exploit.
# This code doesn't actually exploit anything.
# These have members
class exploit():
def __init__(self):
self.title = None
self.description = None
self.urls = []
self.info = {}
self.refnos = {}
def set_title(self, title):
self.title = title
def get_title(self):
return self.title
def get_msno(self):
if 'MS Bulletin' in self.refnos:
return self.refnos['MS Bulletin']
return None
def get_description(self):
return self.description
def add_refno(self, reftype, ref):
self.refnos[reftype] = ref
def set_info(self, inftype, info):
self.info[inftype] = info
def get_info(self, inftype):
if inftype in self.info.keys():
return self.info[inftype]
return None
def add_url(self, url):
# TODO uniq
self.urls.append(url)
def as_string(self):
print "Title: %s" % self.title
if self.description:
print "Description: %s" % self.description
if self.urls:
print "URLs: %s" % " \n".join(self.urls)
for k in self.info.keys():
print "%s: %s" % (k, self.info[k])
for k in self.refnos.keys():
print "%s: %s" % (k, self.refnos[k])
print
|
import os
import pandas as pd, numpy as np
from rudolf.paths import RESULTSDIR
from rudolf.plotting import _get_detrended_flare_data
flaredir = os.path.join(RESULTSDIR, 'flares')
# read data
method = 'itergp'
cachepath = os.path.join(flaredir, f'flare_checker_cache_{method}.pkl')
c = _get_detrended_flare_data(cachepath, method)
flpath = os.path.join(flaredir, f'fldict_{method}.csv')
df = pd.read_csv(flpath)
FL_AMP_CUTOFF = 5e-3
sel = df.ampl_rec > FL_AMP_CUTOFF
sdf = df[sel]
tot_dur = np.sum(sdf.dur)
N_flares = len(sdf)
N_flares_with_successor = len(sdf[sdf.has_Porb_successor])
N_flares_with_candsuccessor = len(sdf[sdf.has_Porb_candsuccessor])
t0,t1 = min(c.time), max(c.time)
tobs = t1-t0
print(f'In {tobs:.1f} days observed...')
print(f'Saw {N_flares} flares > {FL_AMP_CUTOFF:.2e}, spanning total of {tot_dur:.3f} days.')
print(f'->Duty cycle: {100*tot_dur/tobs:.3f}% of time there is a flare above this amplitude.')
print(f'Of these, {N_flares_with_successor} with Porb successor')
print(f'... and {N_flares_with_candsuccessor} with Porb cand successor')
|
# -*- coding: utf-8 -*-
# Copyright 2019 Mathijs Lagerberg.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import constants
from View import View
import locale
from datetime import datetime
DATE_FORMAT_LONG = "%a, %d %b %Y"
DATE_FORMAT = "%-d %B"
DAY_FORMAT = "%A"
#LOCALE = 'nl_NL'
class ClockView(View):
"""
Simple View for date & time.
Prints to the console only.
"""
previous_minute = -1
previous_day = -1
def __init__(self, epd = None):
View.__init__(self, epd)
#locale.setlocale(locale.LC_ALL, LOCALE + '.utf8')
def get_date_format(self):
return DATE_FORMAT
def get_day_format(self):
return DAY_FORMAT
def render(self, draw):
now = datetime.today()
if now.day != self.previous_day:
print(now.strftime(DATE_FORMAT))
self.previous_day = now.day
if now.minute != self.previous_minute:
print('{h:02d}:{m:02d}'.format(h=now.hour, m=now.minute))
self.update_fully = now.minute < self.previous_minute
self.previous_minute = now.minute
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# read data
df_intrinsic = pd.read_csv('./intrinsic_velocities.tsv', '\t')
df_initial = pd.read_csv('./velocities_at_x.tsv', '\t')
# do plotting
fig, axes = plt.subplots(2, 2, figsize = (12, 7))
# sorted by coefficients of friction \mu
for key in df_intrinsic.columns[1:]:
data = df_intrinsic[key].as_matrix().reshape(19, 3)
lambdas = data[::,0]
angular = data[::,1]
transversal = data[::,2]
axes[0][0].plot(lambdas, angular, 'o', label = r'$\mu = %s$' % key)
axes[0][1].plot(lambdas, transversal, 'o', label = r'$\mu = %s$' % key)
# sorted by initial angular velocities \dot{\phi}
for key in (r'\frac{2}{3}\pi', r'\pi', r'\frac{3}{2}\pi', r'2\pi',
r'\frac{5}{2}\pi'):
data = df_initial[key].as_matrix().reshape(21, 3)
pos = data[::,0]
angular = data[::,1]
transversal = data[::,2]
label_template = r'$\dot{\phi}_0 = %s \frac{rad}{sec}$'
axes[1][0].plot(pos, angular, 'o', label = label_template % key)
axes[1][1].plot(pos, transversal, 'o', label = label_template % key)
axes[0][0].set(title = r'Spezifische Winkelgeschwindigkeiten',
xlabel = r'$\lambda$/m',
ylabel = r'$\dot{\phi} \frac{rad}{sec}$')
axes[0][1].set(title = r'Spezifische Transversalgeschwindigkeiten',
xlabel = r'$\lambda$/m',
ylabel = r'$V/\frac{m}{sec}$')
axes[1][0].set(title = r'Entwicklung der Winkelgeschwindigkeit - $\mu=0.2, \lambda=6cm$',
xlabel = r'x/m',
ylabel = r'$\dot{\phi} / \frac{rad}{sec}$')
axes[1][1].set(title = r'Entwicklung der Transversalgeschwindigkeit - $\mu=0.2, \lambda=6cm$',
xlabel = r'x/m',
ylabel = r'$V/\frac{m}{sec}$')
# show legend
for ax in axes.ravel():
ax.legend()
# save figure
plt.tight_layout()
plt.savefig('data.png', dpi=300)
|
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class DealiiParameterGui(CMakePackage):
"""A qt based graphical user interface for editing deal.II .prm parameter
files."""
homepage = "https://github.com/dealii/parameter_gui"
git = "https://github.com/dealii/parameter_gui.git"
version('develop', branch='master')
depends_on('qt')
def setup_environment(self, spack_env, run_env):
run_env.set('PARAMETER_GUI_DIR', self.prefix)
|
import pulsar as psr
def load_ref_system():
""" Returns dimethyl_sulfoxide as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C 1.5407 -0.3295 0.0798
S -0.0596 0.4296 -0.3610
C -0.9467 -1.1596 -0.4976
O -0.4670 0.6286 1.1081
H 2.2637 0.4718 0.3373
H 1.9354 -0.8892 -0.7935
H 1.4455 -1.0221 0.9435
H -0.5613 -1.7221 -1.3732
H -2.0263 -0.9599 -0.6580
H -0.8255 -1.7803 0.4161
""")
|
"""Example 6: second version of a submission is announced."""
from unittest import TestCase, mock
import tempfile
from datetime import datetime
from pytz import UTC
from flask import Flask
from ...services import classic
from ... import save, load, load_fast, domain, exceptions, core
CCO = 'http://creativecommons.org/publicdomain/zero/1.0/'
class TestSecondVersionIsAnnounced(TestCase):
"""Submitter creates a replacement, and it is announced."""
@classmethod
def setUpClass(cls):
"""Instantiate an app for use with a SQLite database."""
_, db = tempfile.mkstemp(suffix='.sqlite')
cls.app = Flask('foo')
cls.app.config['CLASSIC_DATABASE_URI'] = f'sqlite:///{db}'
cls.app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
with cls.app.app_context():
classic.init_app(cls.app)
@mock.patch(f'{core.__name__}.StreamPublisher', mock.MagicMock())
def setUp(self):
"""Create and publish two versions."""
self.submitter = domain.agent.User(1234, email='j.user@somewhere.edu',
forename='Jane', surname='User',
endorsements=['cs.DL', 'cs.IR'])
self.defaults = {'creator': self.submitter}
with self.app.app_context():
classic.drop_all()
classic.create_all()
self.title = "the best title"
self.doi = "10.01234/56789"
self.category = "cs.DL"
self.submission, self.events = save(
domain.event.CreateSubmission(**self.defaults),
domain.event.ConfirmContactInformation(**self.defaults),
domain.event.ConfirmAuthorship(**self.defaults),
domain.event.ConfirmPolicy(**self.defaults),
domain.event.SetTitle(title=self.title, **self.defaults),
domain.event.SetLicense(license_uri=CCO,
license_name="CC0 1.0",
**self.defaults),
domain.event.SetPrimaryClassification(category=self.category,
**self.defaults),
domain.event.SetUploadPackage(checksum="a9s9k342900ks03330029",
source_format=domain.submission.SubmissionContent.Format('tex'), identifier=123,
uncompressed_size=593992,
compressed_size=593992,
**self.defaults),
domain.event.SetAbstract(abstract="Very abstract " * 20,
**self.defaults),
domain.event.SetComments(comments="Fine indeed " * 10,
**self.defaults),
domain.event.SetJournalReference(journal_ref="Foo 1992",
**self.defaults),
domain.event.SetDOI(doi=self.doi, **self.defaults),
domain.event.SetAuthors(authors_display='Robert Paulson (FC)',
**self.defaults),
domain.event.FinalizeSubmission(**self.defaults)
)
# Announce the submission.
self.paper_id = '1901.00123'
with self.app.app_context():
session = classic.current_session()
db_row = session.query(classic.models.Submission).first()
db_row.status = classic.models.Submission.ANNOUNCED
dated = (datetime.now() - datetime.utcfromtimestamp(0))
db_row.document = classic.models.Document(
paper_id=self.paper_id,
title=self.submission.metadata.title,
authors=self.submission.metadata.authors_display,
dated=dated.total_seconds(),
primary_subject_class=self.category,
created=datetime.now(UTC),
submitter_email=self.submission.creator.email,
submitter_id=self.submission.creator.native_id
)
db_row.doc_paper_id = self.paper_id
session.add(db_row)
session.commit()
with self.app.app_context():
new_title = "A better title"
self.submission, self.events = save(
domain.event.CreateSubmissionVersion(**self.defaults),
domain.event.ConfirmContactInformation(**self.defaults),
domain.event.ConfirmAuthorship(**self.defaults),
domain.event.SetLicense(license_uri=CCO,
license_name="CC0 1.0",
**self.defaults),
domain.event.ConfirmPolicy(**self.defaults),
domain.event.SetTitle(title=new_title, **self.defaults),
domain.event.SetUploadPackage(checksum="a9s9k342900ks03330029",
source_format=domain.submission.SubmissionContent.Format('tex'), identifier=123,
uncompressed_size=593992,
compressed_size=593992,
**self.defaults),
domain.event.FinalizeSubmission(**self.defaults),
submission_id=self.submission.submission_id
)
# Announce second version.
with self.app.app_context():
session = classic.current_session()
db_rows = session.query(classic.models.Submission) \
.order_by(classic.models.Submission.submission_id.asc()) \
.all()
db_rows[1].status = classic.models.Submission.ANNOUNCED
session.add(db_rows[1])
session.commit()
def tearDown(self):
"""Clear the database after each test."""
with self.app.app_context():
classic.drop_all()
@mock.patch(f'{core.__name__}.StreamPublisher', mock.MagicMock())
def test_is_in_announced_state(self):
"""The submission is now in announced state."""
# Check the submission state.
with self.app.app_context():
submission, events = load(self.submission.submission_id)
self.assertEqual(submission.status,
domain.submission.Submission.ANNOUNCED,
"The submission is in the publushed state")
self.assertIsInstance(events[-1], domain.event.Announce,
"An Announce event is inserted.")
p_evts = [e for e in events if isinstance(e, domain.event.Announce)]
self.assertEqual(len(p_evts), 2, "There are two publish events.")
self.assertEqual(len(submission.versions), 2,
"There are two announced versions")
with self.app.app_context():
submission = load_fast(self.submission.submission_id)
self.assertEqual(submission.status,
domain.submission.Submission.ANNOUNCED,
"The submission is in the publushed state")
self.assertEqual(len(submission.versions), 2,
"There are two announced versions")
# Check the database state.
with self.app.app_context():
session = classic.current_session()
db_rows = session.query(classic.models.Submission) \
.order_by(classic.models.Submission.submission_id.asc()) \
.all()
self.assertEqual(len(db_rows), 2,
"There are two rows in the submission table")
self.assertEqual(db_rows[0].type,
classic.models.Submission.NEW_SUBMISSION,
"The first row has type 'new'")
self.assertEqual(db_rows[0].status,
classic.models.Submission.ANNOUNCED,
"The first row is announced")
self.assertEqual(db_rows[1].type,
classic.models.Submission.REPLACEMENT,
"The second row has type 'replacement'")
self.assertEqual(db_rows[1].status,
classic.models.Submission.ANNOUNCED,
"The second row is in announced state")
@mock.patch(f'{core.__name__}.StreamPublisher', mock.MagicMock())
def test_can_replace_submission(self):
"""The submission can be replaced, resulting in a new version."""
with self.app.app_context():
submission, events = save(
domain.event.CreateSubmissionVersion(**self.defaults),
submission_id=self.submission.submission_id
)
# Check the submission state.
with self.app.app_context():
submission, events = load(self.submission.submission_id)
self.assertEqual(submission.status,
domain.submission.Submission.WORKING,
"The submission is in the working state")
self.assertEqual(submission.version, 3,
"The version number is incremented by 1")
self.assertEqual(len(submission.versions), 2,
"There are two announced versions")
with self.app.app_context():
submission = load_fast(self.submission.submission_id)
self.assertEqual(submission.status,
domain.submission.Submission.WORKING,
"The submission is in the working state")
self.assertEqual(submission.version, 3,
"The version number is incremented by 1")
self.assertEqual(len(submission.versions), 2,
"There are two announced versions")
# Check the database state.
with self.app.app_context():
session = classic.current_session()
db_rows = session.query(classic.models.Submission) \
.order_by(classic.models.Submission.submission_id.asc()) \
.all()
self.assertEqual(len(db_rows), 3,
"There are three rows in the submission table")
self.assertEqual(db_rows[0].type,
classic.models.Submission.NEW_SUBMISSION,
"The first row has type 'new'")
self.assertEqual(db_rows[0].status,
classic.models.Submission.ANNOUNCED,
"The first row is announced")
self.assertEqual(db_rows[1].type,
classic.models.Submission.REPLACEMENT,
"The second row has type 'replacement'")
self.assertEqual(db_rows[1].status,
classic.models.Submission.ANNOUNCED,
"The second row is in announced state")
self.assertEqual(db_rows[2].type,
classic.models.Submission.REPLACEMENT,
"The third row has type 'replacement'")
self.assertEqual(db_rows[2].status,
classic.models.Submission.NOT_SUBMITTED,
"The third row is in not submitted state")
@mock.patch(f'{core.__name__}.StreamPublisher', mock.MagicMock())
def test_can_withdraw_submission(self):
"""The submitter can request withdrawal of the submission."""
withdrawal_reason = "the best reason"
with self.app.app_context():
submission, events = save(
domain.event.RequestWithdrawal(reason=withdrawal_reason,
**self.defaults),
submission_id=self.submission.submission_id
)
# Check the submission state.
with self.app.app_context():
submission, events = load(self.submission.submission_id)
self.assertEqual(submission.status,
domain.submission.Submission.ANNOUNCED,
"The submission is announced.")
self.assertTrue(submission.has_active_requests,
"The submission has an active request.")
self.assertEqual(len(submission.pending_user_requests), 1,
"There is one pending user request.")
self.assertIsInstance(submission.pending_user_requests[0],
domain.submission.WithdrawalRequest)
self.assertEqual(
submission.pending_user_requests[0].reason_for_withdrawal,
withdrawal_reason,
"Withdrawal reason is set on request."
)
self.assertEqual(len(submission.versions), 2,
"There are two announced versions")
with self.app.app_context():
submission = load_fast(self.submission.submission_id)
self.assertEqual(submission.status,
domain.submission.Submission.ANNOUNCED,
"The submission is announced.")
self.assertTrue(submission.has_active_requests,
"The submission has an active request.")
self.assertEqual(len(submission.pending_user_requests), 1,
"There is one pending user request.")
self.assertIsInstance(submission.pending_user_requests[0],
domain.submission.WithdrawalRequest)
self.assertEqual(
submission.pending_user_requests[0].reason_for_withdrawal,
withdrawal_reason,
"Withdrawal reason is set on request."
)
self.assertEqual(len(submission.versions), 2,
"There are two announced versions")
# Check the database state.
with self.app.app_context():
session = classic.current_session()
db_rows = session.query(classic.models.Submission) \
.order_by(classic.models.Submission.submission_id.asc()) \
.all()
self.assertEqual(len(db_rows), 3,
"There are three rows in the submission table")
self.assertEqual(db_rows[0].type,
classic.models.Submission.NEW_SUBMISSION,
"The first row has type 'new'")
self.assertEqual(db_rows[0].status,
classic.models.Submission.ANNOUNCED,
"The first row is announced")
self.assertEqual(db_rows[1].type,
classic.models.Submission.REPLACEMENT,
"The second row has type 'replacement'")
self.assertEqual(db_rows[1].status,
classic.models.Submission.ANNOUNCED,
"The second row is in announced state")
self.assertEqual(db_rows[2].type,
classic.models.Submission.WITHDRAWAL,
"The third row has type 'withdrawal'")
self.assertEqual(db_rows[2].status,
classic.models.Submission.PROCESSING_SUBMISSION,
"The third row is in the processing submission"
" state.")
@mock.patch(f'{core.__name__}.StreamPublisher', mock.MagicMock())
def test_cannot_edit_submission_metadata(self):
"""The submission metadata cannot be changed without a new version."""
with self.app.app_context():
with self.assertRaises(exceptions.InvalidEvent, msg=(
"Creating a SetTitle command results in an exception.")):
save(domain.event.SetTitle(title="A better title",
**self.defaults),
submission_id=self.submission.submission_id)
self.test_is_in_announced_state()
@mock.patch(f'{core.__name__}.StreamPublisher', mock.MagicMock())
def test_changing_doi(self):
"""Submitter can set the DOI."""
new_doi = "10.1000/182"
new_journal_ref = "Baz 1993"
new_report_num = "Report 82"
with self.app.app_context():
submission, events = save(
domain.event.SetDOI(doi=new_doi, **self.defaults),
submission_id=self.submission.submission_id
)
with self.app.app_context():
submission, events = save(
domain.event.SetJournalReference(journal_ref=new_journal_ref,
**self.defaults),
submission_id=self.submission.submission_id
)
with self.app.app_context():
submission, events = save(
domain.event.SetReportNumber(report_num=new_report_num,
**self.defaults),
submission_id=self.submission.submission_id
)
# Check the submission state.
with self.app.app_context():
submission, events = load(self.submission.submission_id)
self.assertEqual(submission.metadata.doi, new_doi,
"The DOI is updated.")
self.assertEqual(submission.metadata.journal_ref, new_journal_ref,
"The journal ref is updated.")
self.assertEqual(submission.metadata.report_num, new_report_num,
"The report number is updated.")
self.assertEqual(submission.status,
domain.submission.Submission.ANNOUNCED,
"The submission is in the submitted state.")
self.assertEqual(len(submission.versions), 2,
"There are two announced versions")
with self.app.app_context():
submission = load_fast(self.submission.submission_id)
self.assertEqual(submission.metadata.doi, new_doi,
"The DOI is updated.")
self.assertEqual(submission.metadata.journal_ref, new_journal_ref,
"The journal ref is updated.")
self.assertEqual(submission.metadata.report_num, new_report_num,
"The report number is updated.")
self.assertEqual(submission.status,
domain.submission.Submission.ANNOUNCED,
"The submission is in the submitted state.")
self.assertEqual(len(submission.versions), 2,
"There are two announced versions")
# Check the database state.
with self.app.app_context():
session = classic.current_session()
db_rows = session.query(classic.models.Submission) \
.order_by(classic.models.Submission.submission_id.asc()) \
.all()
self.assertEqual(len(db_rows), 3,
"There are three rows in the submission table")
self.assertEqual(db_rows[0].type,
classic.models.Submission.NEW_SUBMISSION,
"The first row has type 'new'")
self.assertEqual(db_rows[0].status,
classic.models.Submission.ANNOUNCED,
"The first row is announced")
self.assertEqual(db_rows[1].type,
classic.models.Submission.REPLACEMENT,
"The second row has type 'replacement'")
self.assertEqual(db_rows[1].status,
classic.models.Submission.ANNOUNCED,
"The second row is in announced state")
self.assertEqual(db_rows[2].type,
classic.models.Submission.JOURNAL_REFERENCE,
"The third row has type journal ref")
self.assertEqual(db_rows[2].status,
classic.models.Submission.PROCESSING_SUBMISSION,
"The third row is in the processing submission"
" state.")
self.assertEqual(db_rows[2].doi, new_doi,
"The DOI is updated in the database.")
self.assertEqual(db_rows[2].journal_ref, new_journal_ref,
"The journal ref is updated in the database.")
self.assertEqual(db_rows[2].report_num, new_report_num,
"The report number is updated in the database.")
@mock.patch(f'{core.__name__}.StreamPublisher', mock.MagicMock())
def test_cannot_be_unfinalized(self):
"""The submission cannot be unfinalized, because it is announced."""
with self.app.app_context():
with self.assertRaises(exceptions.InvalidEvent):
save(domain.event.UnFinalizeSubmission(**self.defaults),
submission_id=self.submission.submission_id)
self.test_is_in_announced_state()
|
from flask import *
from WSServer import server
import WSServer.games
main_pages = Blueprint('main_pages', __name__, template_folder='templates', static_folder='static')
@main_pages.route('/')
def lobby():
return render_template('base.html', channel_id=1)
@main_pages.route('/auth', methods=['POST', 'GET'])
def auth():
if session.get('user'):
return redirect('')
if request.method == 'GET':
return render_template('auth.html')
if request.method == 'POST':
user = request.form.get('user')
password = request.form.get('password')
if (user in server.db.users) and (server.db.users[user]['password'] == password):
user = server.db.users[user]
session['user'] = user['user']
session['user_id'] = user['user_id']
session['user_rights'] = user['user_rights']
return redirect('')
return render_template('auth.html', error='Неверный логин или пароль!')
abort(418)
@main_pages.route('/reg', methods=['POST', 'GET'])
def reg():
if session.get('user'):
return redirect('')
if request.method == 'GET':
return render_template('reg.html')
if request.method == 'POST':
user = request.form.get('user')
password = request.form.get('password')
repassword = request.form.get('repassword')
if (user not in server.db.users) and (password == repassword):
user_id = max([server.db.users[i]['user_id'] for i in server.db.users]) + 1
session['user'] = user
session['user_id'] = user_id
session['user_rights'] = 1
server.db.users[session['user']] = {
'user': session['user'],
'password': session['password'],
'user_rights': 1,
'user_id': user_id,
'user_stat': {i: [0, 0] for i in WSServer.games.game_types}
}
server.db.db_save_all()
return redirect('')
return render_template('reg.html', error='Такой пользователь уже существует, или пароли не совпадают!')
abort(418)
@main_pages.route('/user/<user_id>')
def user_data(user_id):
user_information = server.db.get_user_id_information(int(user_id))
return render_template('pa.html', user_information=user_information)
@main_pages.route('/chat/<channel_id>')
def chat(channel_id):
return render_template('chat.html', channel_id=channel_id)
|
from covid19uncle import GlobalCovid19,ThaiCovid19
#Example:
data = GlobalCovid19()
print(data['italy'])
print(data['italy']['total'])
print(data['italy']['new_cases'])
print(data['italy']['total_deaths'])
print(data['italy']['new_deaths'])
print(data['italy']['total_recoverd'])
print(data['italy']['active_cases'])
print(data['italy']['serious_critical'])
print(data['italy']['totalcase_per1million'])
print(data['header']) #show header
print(data['total']) #show total
print(data['italy']['list']) #show list of Italy Information
thai = ThaiCovid19()
print('อัพเดต:', thai['อัพเดต'])
print('ผู้ป่วยสะสม', thai['ผู้ป่วยสะสม'])
print('ผู้ป่วยรายใหม่', thai['ผู้ป่วยรายใหม่'])
print('ผู้ป่วยรุนแรง', thai['ผู้ป่วยรุนแรง'])
print('ผู้ป่วยเสียชีวิต', thai['ผู้ป่วยเสียชีวิต'])
print('ผู้ป่วยกลับบ้านแล้ว', thai['ผู้ป่วยกลับบ้านแล้ว'])
print('ผู้ป่วยเฝ้าระวังสะสม', thai['ผู้ป่วยเฝ้าระวังสะสม'])
print('ผู้ป่วยเฝ้าระวังรายใหม่', thai['ผู้ป่วยเฝ้าระวังรายใหม่'])
print('รักษาพยาบาลอยู่รพ', thai['รักษาพยาบาลอยู่รพ.'])
print('รักษาพยาบาลกลับบ้าน', thai['รักษาพยาบาลกลับบ้าน'])
print('รักษาพยาบาลสังเกตอาการ', thai['รักษาพยาบาลสังเกตอาการ'])
print('ผู้เดินทางที่คัดกรองสะสมจากสนามบิน', thai['ผู้เดินทางที่คัดกรองสะสมจากสนามบิน'])
print('ผู้เดินทางที่คัดกรองสะสมจากท่าเรือ', thai['ผู้เดินทางที่คัดกรองสะสมจากท่าเรือ'])
print('ผู้เดินทางที่คัดกรองสะสมจากด่านพรมแดน', thai['ผู้เดินทางที่คัดกรองสะสมจากด่านพรมแดน'])
print('ผู้เดินทางที่คัดกรองสะสมจากสตม.แจ้งวัฒนะ', thai['ผู้เดินทางที่คัดกรองสะสมจากสตม.แจ้งวัฒนะ'])
print('อ้างอิง', thai['อ้างอิง']) |
'''
This file biult for adding the Objective Function.
The main function is objective_funcion() to be called
from any other files such as GA and TS.
'''
def objective_function(chromosome):
'''
This function is the objective function for any case needed it only need
a chromosome to return the fitness value.
'''
# Porosity example
# Porosity function need 2 vaiables GSS and OSS with float range -1 to 1
# Optimum value 1.04 @ GSS=1 and OSS=0
# GSS = chromosome['gene'][0]
# OSS = chromosome['gene'][1]
# Porosity = 5.75 - 0.00009 * 40 - 0.00953 * 60 - 0.0195 * 20 + 0.208 * 1 \
# - 0.1313 * 90 - 0.327 * GSS - 0.456 * OSS - 0.000012 * (40 ** 2) \
# + 0.000017 * (60 ** 2) + 0.000106 * (20 ** 2) + 0.2283*(1 ** 2) \
# + 0.000946 * (90 ** 2) - 0.0934 * (GSS ** 2) + 0.1598 * (OSS ** 2) \
# + 0.000028 * 40 * 60 - 0.000068 * 40 * 20 + 0.00177 * 40 * 1 \
# + 0.000031 * 40 * 90 - 0.00003 * 40 * GSS + 0.00008 * 40 * OSS \
# + 0.000058 * 60 * 20 - 0.002092 * 60 * 1 - 0.000031 * 60 * 90 \
# + 0.00077 * 60 * GSS + 0.00072 * 60 * OSS - 0.00148 * 20 * 1 \
# + 0.000064 * 20 * 90 + 0.00179 * 20 * GSS + 0.00015 * 20 * OSS \
# + 0.00053 * 1 * 90 + 0.002 * 1 * GSS + 0.0191 * 1 * OSS \
# + 0.00136 * 90 * GSS + 0.00419 * 90 * OSS + 0.012 * GSS * OSS
# chromosome['fitness'] = Porosity
# Goldstein–Price function
# Goldstein–Price function need 2 vaiables x and y with float range -2 to 2
# Optimum value 3 @ x=0 and y=-1
# x = chromosome['gene'][0]
# y = chromosome['gene'][1]
# Fxy = (1 + ((x + y + 1) ** 2) * (19 - 14 * x + 3 * (x ** 2) - 14 * y + 6 * x * y + 3 * (y ** 2))) \
# * (30 + ((2 * x - 3 * y) ** 2) * (18 - 32 * x + 12 * (x ** 2) + 48 * y - 36 * x * y + 27 * (y ** 2)))
# chromosome['fitness'] = Fxy
# Booth function
# Booth function need 2 vaiables x and y with float range -10 to 10
# Optimum value 0 @ x=1 and y=3
x = chromosome['gene'][0]
y = chromosome['gene'][1]
Fxy = ((x + 2 * y - 7) ** 2) + ((2 * x + y - 5) ** 2)
chromosome['fitness'] = Fxy
return chromosome
|
#!../../../env/bin/python
'''
This script will query all messages new as of yesterday and ensure
that they exist in the archive
'''
# Standalone broilerplate -------------------------------------------------------------
from django_setup import do_setup
do_setup()
# -------------------------------------------------------------------------------------
import argparse
import datetime
import os
from django.conf import settings
# from haystack.query import SearchQuerySet
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search
from mlarchive.archive.models import Message
import logging
logpath = os.path.join(settings.DATA_ROOT,'log/check_index24.log')
logging.basicConfig(filename=logpath,level=logging.DEBUG)
def main():
parser = argparse.ArgumentParser(description='Check that messages are indexed')
parser.add_argument('--age', type=int, default=24, help="Check messages this many hours old. Default is 24.")
parser.add_argument('-f','--fix',help="perform fix",action='store_true')
args = parser.parse_args()
now = datetime.datetime.now()
start = now - datetime.timedelta(hours=args.age)
end = now - datetime.timedelta(minutes=1)
count = 0
stat = {}
client = Elasticsearch()
messages = Message.objects.filter(updated__gte=start,updated__lt=end)
for message in messages:
s = Search(using=client, index=settings.ELASTICSEARCH_INDEX_NAME)
s = s.query('match', msgid=message.msgid)
s = s.query('match', email_list=message.email_list.name)
if s.count() != 1:
print("Message not indexed. {list}: {msgid}".format(
list=message.email_list,
msgid=message.msgid))
count = count + 1
logging.warning(message.msgid + '\n')
stat[message.email_list.name] = stat.get(message.email_list.name,0) + 1
if args.fix:
message.save()
print("Index Check {date}".format(date=start.strftime('%Y-%m-%d')))
print("Checked {count}".format(count=messages.count()))
print("Missing {count}".format(count=count))
for k,v in list(stat.items()):
print("{}:{}".format(k,v))
if __name__ == "__main__":
main()
|
from RequirementBaseClass import RequirementBaseClass
from Strategy.StrategyBaseClass import StrategyBaseClass
import StructuredMask
class FunctionCreator(RequirementBaseClass, StrategyBaseClass):
masks: list
def __init__(self, function_name):
RequirementBaseClass.__init__(self)
# Syntax
# <Keyword> Object (z.B. <Detail>)
# [] Optional (z.B. [ein])
# | alternativ (muss auch ans Ende der Alternative z.B. ein/eine/einer/)
# {} Kann sich beliebig oft wiederholen (z.B. {})
self.masks = []
self.masks.append(("Schreibe {<Detail>} auf die Konsole", lambda library: " ".join(library["Detail"])))
self.StructuredMaskInstance = StructuredMask.StructuredMask(self.masks)
self.function_name = function_name |
__all__ = ['flexible_distribute', 'FlexibleDistribute', 'to_admin']
import csv
import os
import threading
from BusinessCentralLayer.middleware.work_io import *
from BusinessLogicLayer.dog import subs2node
@logger.catch()
class FlexibleDistribute(object):
"""数据交换 弹性分发"""
def __init__(self, docker: tuple = None) -> None:
"""
@param docker: (class_, {subscribe: end_life})
"""
# 若容器激活,迅速入队
if all(docker):
Middleware.zeus.put_nowait(docker)
self.key_name = REDIS_SECRET_KEY.format(docker[0])
def to_mysql(self) -> None:
...
def to_mongo(self) -> None:
...
@staticmethod
def to_redis() -> None:
"""
@return:
"""
# 初始化容器
docker = dict(zip(CRAWLER_SEQUENCE, [{}] * 2))
# 任务出列 更新容器
while not Middleware.zeus.empty():
alice = Middleware.zeus.get_nowait()
docker[alice[0]].update((alice[-1]))
# 刷新Redis
bob = RedisClient(db=4).get_driver()
for xps in docker.items():
bob.hset(name=xps[0], mapping=xps[-1])
@staticmethod
def to_nginx(class_, subscribe) -> None:
"""
直接调用--to show subs
@param class_: 链接类型
@param subscribe: 该类型链接
@return:
"""
with open(NGINX_SUBSCRIBE.format(class_), 'w', encoding='utf-8') as f:
f.write(subscribe)
# TODO:该模块将被弃用 后续版本将引入多路IO模块,代码使用class封装
def flexible_distribute(subscribe, class_, life_cycle: str, driver_name=None):
"""
@param subscribe:
@param class_:
@param life_cycle:
@param driver_name:
@return:
"""
# data --> Database(Mysql)
# data --> Database(MongoDB)
# data --> Redis
threading.Thread(target=RedisClient().add, args=(REDIS_SECRET_KEY.format(class_), subscribe, life_cycle)).start()
# data --> csv
with open(SERVER_PATH_DATABASE_FETCH, 'a', encoding='utf-8', newline='') as f:
writer = csv.writer(f)
# 入库时间 subscribe 类型
now_ = str(datetime.now(TIME_ZONE_CN)).split('.')[0]
writer.writerow([f'{now_}', f"{driver_name}", f'{subscribe}', class_])
# data --> <Nginx> if linux or <Cache>
try:
with open(NGINX_SUBSCRIBE.format(class_), 'w', encoding='utf-8') as f:
f.write(subscribe)
except FileNotFoundError as e:
print(e)
def to_admin(class_):
response = {'msg': 'failed'}
# 获取链接
if class_ in CRAWLER_SEQUENCE:
try:
logger.debug("管理员模式--点取链接")
subs = markup_admin_element(class_)
if subs:
node_info: dict = subs2node(os.path.join(SERVER_DIR_DATABASE_CACHE, 'subs2node.txt'), subs)
logger.success('管理员模式--链接分发成功')
response.update(
{'msg': 'success', 'subscribe': node_info.get('subs'), 'nodeInfo': node_info.get('node'),
'subsType': class_})
logger.info('管理员模式--尝试补充链接池')
threading.Thread(target=step_admin_element, kwargs={"class_": class_}).start()
logger.success('管理员模式--补充成功')
except Exception as e:
logger.exception(e)
finally:
return response
|
MOD = 1000000
REM = 1
for i in range(3, MOD):
if i % 2 != 0 and i % 5 != 0:
REM = REM * i % MOD
def count_factor(n, f):
ret = 0
div = f
while n >= div:
ret += n / div
div *= f
return ret
def even_factorize(n):
if n == 0:
return 1
else:
return factorize(n / 2)
def odd_factorize_coprime(n):
ret = pow(REM, n / MOD, MOD)
n %= MOD
for i in range(3, n + 1):
if i % 2 != 0 and i % 5 != 0:
ret = ret * i % MOD
return ret
def odd_factorize(n):
if n == 0:
return 1
else:
return odd_factorize(n / 5) * odd_factorize_coprime(n) % MOD
def factorize(n):
return even_factorize(n) * odd_factorize(n) % MOD
def solve(n):
twos = count_factor(n, 2) - count_factor(n, 5)
return factorize(n) * pow(2, twos, MOD) % MOD
if __name__ == "__main__":
print solve(10 ** 12)
|
import sys
import numpy as np
from numpy.ctypeslib import ndpointer, load_library
from numpy.testing import *
try:
cdll = load_library('multiarray', np.core.multiarray.__file__)
_HAS_CTYPE = True
except ImportError:
_HAS_CTYPE = False
class TestLoadLibrary(TestCase):
@dec.skipif(not _HAS_CTYPE, "ctypes not available on this python installation")
@dec.knownfailureif(sys.platform=='cygwin', "This test is known to fail on cygwin")
def test_basic(self):
try:
cdll = load_library('multiarray',
np.core.multiarray.__file__)
except ImportError, e:
msg = "ctypes is not available on this python: skipping the test" \
" (import error was: %s)" % str(e)
print msg
@dec.skipif(not _HAS_CTYPE, "ctypes not available on this python installation")
@dec.knownfailureif(sys.platform=='cygwin', "This test is known to fail on cygwin")
def test_basic2(self):
"""Regression for #801: load_library with a full library name
(including extension) does not work."""
try:
try:
from distutils import sysconfig
so = sysconfig.get_config_var('SO')
cdll = load_library('multiarray%s' % so,
np.core.multiarray.__file__)
except ImportError:
print "No distutils available, skipping test."
except ImportError, e:
msg = "ctypes is not available on this python: skipping the test" \
" (import error was: %s)" % str(e)
print msg
class TestNdpointer(TestCase):
def test_dtype(self):
dt = np.intc
p = ndpointer(dtype=dt)
self.assert_(p.from_param(np.array([1], dt)))
dt = '<i4'
p = ndpointer(dtype=dt)
self.assert_(p.from_param(np.array([1], dt)))
dt = np.dtype('>i4')
p = ndpointer(dtype=dt)
p.from_param(np.array([1], dt))
self.assertRaises(TypeError, p.from_param,
np.array([1], dt.newbyteorder('swap')))
dtnames = ['x', 'y']
dtformats = [np.intc, np.float64]
dtdescr = {'names' : dtnames, 'formats' : dtformats}
dt = np.dtype(dtdescr)
p = ndpointer(dtype=dt)
self.assert_(p.from_param(np.zeros((10,), dt)))
samedt = np.dtype(dtdescr)
p = ndpointer(dtype=samedt)
self.assert_(p.from_param(np.zeros((10,), dt)))
dt2 = np.dtype(dtdescr, align=True)
if dt.itemsize != dt2.itemsize:
self.assertRaises(TypeError, p.from_param, np.zeros((10,), dt2))
else:
self.assert_(p.from_param(np.zeros((10,), dt2)))
def test_ndim(self):
p = ndpointer(ndim=0)
self.assert_(p.from_param(np.array(1)))
self.assertRaises(TypeError, p.from_param, np.array([1]))
p = ndpointer(ndim=1)
self.assertRaises(TypeError, p.from_param, np.array(1))
self.assert_(p.from_param(np.array([1])))
p = ndpointer(ndim=2)
self.assert_(p.from_param(np.array([[1]])))
def test_shape(self):
p = ndpointer(shape=(1,2))
self.assert_(p.from_param(np.array([[1,2]])))
self.assertRaises(TypeError, p.from_param, np.array([[1],[2]]))
p = ndpointer(shape=())
self.assert_(p.from_param(np.array(1)))
def test_flags(self):
x = np.array([[1,2,3]], order='F')
p = ndpointer(flags='FORTRAN')
self.assert_(p.from_param(x))
p = ndpointer(flags='CONTIGUOUS')
self.assertRaises(TypeError, p.from_param, x)
p = ndpointer(flags=x.flags.num)
self.assert_(p.from_param(x))
self.assertRaises(TypeError, p.from_param, np.array([[1,2,3]]))
if hasattr(sys, 'gettotalrefcount'):
# skip this test class when Python was compiled using
# the --with-pydebug option. This is necessary because, i.e.
# type("foo", (object,), {})
# leaks references
del TestNdpointer
if __name__ == "__main__":
run_module_suite()
|
# TODO - put all constants here
|
# Cálculos Genéricos.
def sumar(n1, n2):
print("El resultado de la suma de", n1, "más", n2, "es:", n1+n2)
def restar(n1, n2):
print("El resultado de la resta de", n1, "menos", n2, "es:", n1-n2)
def producto(n1, n2):
print("El resultado del producto de", n1, "por", n2, "es:", n1*n2)
def divis(n1, n2):
print("El resultado de la división de", n1, "entre", n2, "es:", n1/n2)
def potencia(n1, n2):
print("El resultado de la potencia de", n1, "elevado a", n2, "es:", n1**n2)
def redond(n1):
print("El valor redondeado de", n1, "es:", round(n1))
|
import pygame
import game_objects
import consts
import color
import animations
from typing import Tuple, List
import random
import status
import effect
class Excepties(game_objects.GameObject):
def __init__(self, status_: status.Status) -> None:
super().__init__()
self._animation = animations.Error(random.choice([
animations.Surfaces.FLOATINGPOINTERROR,
animations.Surfaces.INDEXERROR,
animations.Surfaces.KEYERROR,
animations.Surfaces.MEMORYERROR,
animations.Surfaces.NOTIMPLEMENTEDERROR,
animations.Surfaces.OVERFLOWERROR,
animations.Surfaces.RECURSIONERROR,
animations.Surfaces.RUNTIMEERROR,
animations.Surfaces.TYPEERROR]))
self._y = random.randint(30, consts.SCREEN_H - 140)
self._status = status_
self._width = self._animation.surface.get_width()
offset, range_ = consts.EXCEPTION_SPEEDS[min(self._status.level, len(consts.EXCEPTION_SPEEDS) - 1)]
self._speed = offset + random.random() * range_
def render(self, target) -> None:
self._animation.render(target, self._x, self._y)
def update(self, delta) -> None:
self._x += (delta * self._speed)
self._animation.update(delta)
if (self._x + self._width) > consts.SCREEN_W:
self.delete()
self._status.dec_health()
effect.Effect(self.pos, (300, 10))
def _get_surface(self) -> pygame.Surface:
return self._animation.surface
|
from __future__ import absolute_import, division, print_function, unicode_literals
import os.path
from echomesh.base import Name
from echomesh.base import Path
from echomesh.base import Platform
from echomesh.base import Yaml
COMMAND_PATH = None
COMMAND_PATH_NAMES = None
def clean(*path):
return os.path.join(*path).split('/')
def _command_file(*path):
path = clean(*path)
if path[0] == 'default':
return os.path.join(Path.CODE_PATH, 'echomesh', 'config', *path[1:])
else:
return os.path.join(Path.PROJECT_PATH, 'command', *path)
COMMAND_PATH = None
def compute_command_path():
global COMMAND_PATH, COMMAND_PATH_NAMES
COMMAND_PATH = (['name/' + Name.NAME] +
[('tag/' + t) for t in Name.TAGS] +
['platform/' + Platform.PLATFORM,
'master',
_command_file('default/platform/%s' % Platform.PLATFORM),
_command_file('default')])
COMMAND_PATH_NAMES = (['name'] + # TODO: fix?
[('tag/' + t) for t in Name.TAGS] +
['platform/' + Platform.PLATFORM,
'master',
'default/platform/%s' % Platform.PLATFORM,
'default'])
compute_command_path()
def named_paths():
return zip(COMMAND_PATH_NAMES, COMMAND_PATH)
def expand(*path):
# These first two lines are to make sure we split on / for Windows and others.
path = clean(*path)
return [os.path.join('command', i, *path) for i in COMMAND_PATH]
def resolve(*path):
x = expand(*path)
for f in x:
try:
return Yaml.filename(f)
except:
continue
def load_resolve(*path):
f = resolve(*path)
if f:
data = Yaml.read(f)
if data:
return f, data
raise Exception("Couldn't read Yaml from file %s" % os.path.join(*path))
def load(*path):
return load_resolve(*path)[1]
def base_file(*path):
return _command_file('master', *path)
def config_file(scope='default'):
return _command_file(scope, 'config.yml')
def read_config(scope='default'):
return Yaml.read(config_file(scope))
|
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Driver for determining which type of servo is being used."""
import logging
import os
import hw_driver
import servo.servo_logging
class metadataError(hw_driver.HwDriverError):
"""Error class for metadata information."""
class servoMetadata(hw_driver.HwDriver):
"""Class to access loglevel controls."""
def __init__(self, interface, params):
"""Initializes the ServoType driver.
Args:
interface: A driver interface object. This is the servod interface.
params: A dictionary of parameters, but is ignored.
"""
super(servoMetadata, self).__init__(interface, params)
def _Get_type(self):
"""Gets the current servo type."""
return self._interface._version
def _Get_pid(self):
"""Return servod instance pid"""
return os.getpid()
def _Get_serial(self):
"""Gets the current servo serial."""
return self._interface.get_serial_number(self._interface.MAIN_SERIAL)
def _Get_config_files(self):
"""Gets the configuration files used for this servo server invocation"""
xml_files = self._interface._syscfg._loaded_xml_files
# See system_config.py for schema, but entry[0] is the file name
return [entry[0] for entry in xml_files]
def _Get_tagged_controls(self):
"""Retrieve all controls under a certain tag."""
if 'tag' not in self._params:
raise metadataError('tag needs to be specified in params.')
return self._interface._syscfg.get_controls_for_tag(self._params['tag'])
def _Set_rotate_logs(self, _):
"""Force a servo log rotation."""
handlers = [h for h in logging.getLogger().handlers if
isinstance(h, servo.servo_logging.ServodRotatingFileHandler)]
self._logger.info('Rotating out the log file per user request.')
if not handlers:
self._logger.warn('No ServodRotatingFileHandlers on this instance. noop.')
for h in handlers:
h.doRollover()
def _Get_servod_logs_active(self):
"""Return whether servod file logging is turned on."""
for h in logging.getLogger().handlers:
if isinstance(h, servo.servo_logging.ServodRotatingFileHandler):
# Automatically converted to the 'yes/no' by servod.
return 1
return 0
def _Set_log_msg(self, msg):
"""Log |msg| into info."""
self._logger.info('%s', msg)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# $Id: close_orphaned_testsets.py $
# pylint: disable=C0301
"""
Maintenance tool for closing orphaned testsets.
"""
__copyright__ = \
"""
Copyright (C) 2012-2017 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 118412 $"
# Standard python imports
import sys
import os
from optparse import OptionParser
# Add Test Manager's modules path
g_ksTestManagerDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(g_ksTestManagerDir)
# Test Manager imports
from testmanager.core.db import TMDatabaseConnection
from testmanager.core.testset import TestSetLogic;
class CloseOrphanedTestSets(object):
"""
Finds and closes orphaned testsets.
"""
def __init__(self):
"""
Parse command line
"""
oParser = OptionParser();
oParser.add_option('-d', '--just-do-it', dest='fJustDoIt', action='store_true',
help='Do the database changes.');
(self.oConfig, _) = oParser.parse_args();
def main(self):
""" Main method. """
oDb = TMDatabaseConnection();
# Get a list of orphans.
oLogic = TestSetLogic(oDb);
aoOrphans = oLogic.fetchOrphaned();
if aoOrphans:
# Complete them.
if self.oConfig.fJustDoIt:
print 'Completing %u test sets as abandoned:' % (len(aoOrphans),);
for oTestSet in aoOrphans:
print '#%-7u: idTestBox=%-3u tsCreated=%s tsDone=%s' \
% (oTestSet.idTestSet, oTestSet.idTestBox, oTestSet.tsCreated, oTestSet.tsDone);
oLogic.completeAsAbandoned(oTestSet.idTestSet);
print 'Committing...';
oDb.commit();
else:
for oTestSet in aoOrphans:
print '#%-7u: idTestBox=%-3u tsCreated=%s tsDone=%s' \
% (oTestSet.idTestSet, oTestSet.idTestBox, oTestSet.tsCreated, oTestSet.tsDone);
print 'Not completing any testsets without seeing the --just-do-it option.'
else:
print 'No orphaned test sets.\n'
return 0;
if __name__ == '__main__':
sys.exit(CloseOrphanedTestSets().main())
|
from symbolicExpressions import *
import math
debug = False
def make_env(lst_of_identifiers, test_pt):
env = {}
for (id, v) in zip(lst_of_identifiers, test_pt):
env[id] = v
return env
def checkFunctionValidity(fun_expr, lst_of_identifiers, test_point_list):
for test_pt in test_point_list:
env = make_env(lst_of_identifiers, test_pt)
try:
fun_expr.eval(env)
except:
if debug:
print(f'Failed expression {fun_expr}')
return False
return True
def is_viable_expr(fun_expr, lst_of_identifiers, params):
return checkFunctionValidity(fun_expr, lst_of_identifiers, params.test_points)
def compute_fitness(fun_expr, lst_of_identifiers, params):
regression_training_data = params.regression_training_data
fitness = 0.0
for (test_pt, y) in regression_training_data:
env = make_env(lst_of_identifiers, test_pt)
try:
yHat = fun_expr.eval(env)
fitness = fitness - (yHat - y)**2
except:
fitness = -float('inf')
if debug:
print(f'Warning: Expression evaluation failed: {fun_expr} @ {test_pt}')
return fitness
return (fitness)
|
import re
import itertools as it
import numpy as np
from seasalt import create_feature_names
def transformer_custom_settings(global_dict, X, y=None, fit=True, names=True):
r = re.compile(r"trans\d+")
Z = []
f = []
for st in list(filter(r.match, global_dict)):
sidx = st[5:]
sm = "meta" + sidx
if sidx.isdigit():
print(sidx, st, sm)
if fit:
global_dict[st].fit(X, y)
Z_tmp = global_dict[st].transform(X)
Z.append(Z_tmp)
if names:
f.append(create_feature_names(
global_dict[sm]['feature_names_prefix'], Z_tmp.shape[1]))
return np.concatenate(Z, axis=1), list(it.chain.from_iterable(f))
|
"""
SC101 Baby Names Project
Adapted from Nick Parlante's Baby Names assignment by
Jerry Liao.
File: babygraphics.py
Name: Jade Yeh
This file shows graphic of the name_data.
The user provides names and the coder will draw a line chart containing year and rank for the certain names.
"""
import tkinter
import babynames
import babygraphicsgui as gui
FILENAMES = [
'data/full/baby-1900.txt', 'data/full/baby-1910.txt',
'data/full/baby-1920.txt', 'data/full/baby-1930.txt',
'data/full/baby-1940.txt', 'data/full/baby-1950.txt',
'data/full/baby-1960.txt', 'data/full/baby-1970.txt',
'data/full/baby-1980.txt', 'data/full/baby-1990.txt',
'data/full/baby-2000.txt', 'data/full/baby-2010.txt'
]
CANVAS_WIDTH = 1000 # Width of the canvas.
CANVAS_HEIGHT = 600 # Height of the canvas.
YEARS = [1900, 1910, 1920, 1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010] # Year list for all files in database.
GRAPH_MARGIN_SIZE = 20 # The edge size for the canvas.
COLORS = ['red', 'purple', 'green', 'blue', 'orange'] # Color list for coloring texts and lines.
TEXT_DX = 2 # Text distance from a vertical line.
LINE_WIDTH = 2 # The width size of a line.
MAX_RANK = 1000 # The max rank in the files.
def get_x_coordinate(width, year_index):
"""
Given the width of the canvas and the index of the current year
in the YEARS list, returns the x coordinate of the vertical
line associated with that year.
Input:
width (int): The width of the canvas
year_index (int): The index of the current year in the YEARS list
Returns:
x_coordinate (float): The x coordinate of the vertical line associated
with the specified year.
"""
# Divides the width of the line chart by the total number of years and get x-coordinate by year.
space = (width - GRAPH_MARGIN_SIZE * 2) / len(YEARS)
x = GRAPH_MARGIN_SIZE + year_index * space
return x
def draw_fixed_lines(canvas):
"""
Erases all existing information on the given canvas and then
draws the fixed background lines on it.
Input:
canvas (Tkinter Canvas): The canvas on which we are drawing.
Returns:
This function does not return any value.
"""
canvas.delete('all') # delete all existing lines from the canvas
# Write your code below this line
#################################
# Draw the top frame line for the line chart.
canvas.create_line(GRAPH_MARGIN_SIZE, GRAPH_MARGIN_SIZE, CANVAS_WIDTH - GRAPH_MARGIN_SIZE,
GRAPH_MARGIN_SIZE, width=LINE_WIDTH)
# Draw the bottom frame line for the line chart.
canvas.create_line(GRAPH_MARGIN_SIZE, CANVAS_HEIGHT - GRAPH_MARGIN_SIZE, CANVAS_WIDTH - GRAPH_MARGIN_SIZE,
CANVAS_HEIGHT - GRAPH_MARGIN_SIZE, width=LINE_WIDTH)
# Draw vertical lines and texts for line chart by year.
for yr in range(len(YEARS)):
x = get_x_coordinate(width=CANVAS_WIDTH, year_index=yr)
canvas.create_line(x, 0, x, CANVAS_HEIGHT, width=LINE_WIDTH)
canvas.create_text(x + TEXT_DX, CANVAS_HEIGHT - GRAPH_MARGIN_SIZE, text=YEARS[yr], anchor=tkinter.NW)
def draw_names(canvas, name_data, lookup_names):
"""
Given a dict of baby name data and a list of name, plots
the historical trend of those names onto the canvas.
Input:
canvas (Tkinter Canvas): The canvas on which we are drawing.
name_data (dict): Dictionary holding baby name data
lookup_names (List[str]): A list of names whose data you want to plot
Returns:
This function does not return any value.
"""
draw_fixed_lines(canvas) # draw the fixed background grid
# Write your code below this line
#################################
color_num = 0
# Look up each name for the year&rank data that the user wants to view.
for name in lookup_names:
# 1st for-loop creates a list of rank by year order.
rank_lst = []
for i in range(len(YEARS)):
# Look up data in dict{year: rank} by year in string format.
if str(YEARS[i]) not in name_data[name]:
# Assign a rank as '9999' for a non-existed name in the database for a certain year.
rk = '9999'
else:
rk = name_data[name][str(YEARS[i])]
# List the rank in an integer format.
rank_lst.append(int(rk))
# 2nd for-loop creates a list of x- and y-coordinate by year order.
x_y_lst = []
for j in range(len(YEARS)):
x = get_x_coordinate(width=CANVAS_WIDTH, year_index=j)
y = get_y_coordinate(height=CANVAS_HEIGHT, rank=rank_lst[j])
x_y_lst.append(x)
x_y_lst.append(y)
# Show '*' as the rank value for the name with actual rank over the MAX_RANK constant.
if rank_lst[j] > MAX_RANK:
canvas.create_text(x + TEXT_DX, y, text=str(name) + '*', anchor=tkinter.SW,
fill=COLORS[color_num % len(COLORS)])
# Show the name with its rank value.
else:
canvas.create_text(x+TEXT_DX, y, text=str(name)+str(rank_lst[j]), anchor=tkinter.SW,
fill=COLORS[color_num % len(COLORS)])
# 3rd for-loop draws the trend lines.
for n in range(len(YEARS)-1):
canvas.create_line(x_y_lst[n * 2], x_y_lst[n * 2 + 1], x_y_lst[n * 2 + 2],
x_y_lst[n * 2 + 3], width=LINE_WIDTH, fill=COLORS[color_num % len(COLORS)])
# Change color for the next name data.
color_num += 1
def get_y_coordinate(height, rank):
"""
Given the height of the canvas and the index of the current rank
in the rank list, returns the y coordinate of one end of the trend line
associated with that rank.
Input:
height (int): The height of the canvas
rank (int): The rank associated with a certain name in a certain year
Returns:
y_coordinate (float): The y coordinate of one end of the trend line associated
with the rank of the current year.
"""
# Divided the line chart frame by MAX_RANK vertically and equally AND get y by the current rank.
if rank > MAX_RANK:
# Set y as the bottom frame line when the current rank is over MAX_RANK.
y = height - GRAPH_MARGIN_SIZE
else:
y = (height - GRAPH_MARGIN_SIZE * 2) / MAX_RANK * rank + GRAPH_MARGIN_SIZE
return y
# main() code is provided, feel free to read through it but DO NOT MODIFY
def main():
# Load data
name_data = babynames.read_files(FILENAMES)
# Create the window and the canvas
top = tkinter.Tk()
top.wm_title('Baby Names')
canvas = gui.make_gui(top, CANVAS_WIDTH, CANVAS_HEIGHT, name_data, draw_names, babynames.search_names)
# Call draw_fixed_lines() once at startup so we have the lines
# even before the user types anything.
draw_fixed_lines(canvas)
# This line starts the graphical loop that is responsible for
# processing user interactions and plotting data
top.mainloop()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# Using Arista's pyeapi, create a script that allows you to add a VLAN (both the VLAN ID and the VLAN name).
# Your script should first check that the VLAN ID is available and only add the VLAN if it doesn't already exist.
# Use VLAN IDs between 100 and 999. You should be able to call the script from the command line as follows:
#
# python eapi_vlan.py --name blue 100 # add VLAN100, name blue
#
# If you call the script with the --remove option, the VLAN will be removed.
#
# python eapi_vlan.py --remove 100 # remove VLAN100
#
# Once again only remove the VLAN if it exists on the switch.
# You will probably want to use Python's argparse to accomplish the argument processing.
#
# In the lab environment, if you want to directly execute your script,
# then you will need to use '#!/usr/bin/env python' at the top of the script (instead of '!#/usr/bin/python').
#
# ~/.eapi.conf
# [connection:pynet-sw1]
# username: eapi
# password: 99saturday
# host: 50.76.53.27
# port: 8243
# transport: https
import pyeapi
import argparse
from pprint import pprint
parser = argparse.ArgumentParser(description='Script configure vlan id and vlan name in Arista switch')
# Add arguments
parser.add_argument(
'-n', '--name', type=str, help='Vlan name', action='store', dest='vlan_name')
parser.add_argument(
'-v', '--vlan', type=str, help='Vlan number', action='store', dest='vlan_id')
parser.add_argument(
'-d', '--remove', type=str, help='Delete Vlan', action='store', dest='delete_vlan')
results = parser.parse_args()
var_vlan_name = results.vlan_name
var_vlan_id = results.vlan_id
var_del_vlan = results.delete_vlan
if var_del_vlan != None:
delete_vlan = 'no vlan %s' % results.delete_vlan
pynet_sw1 = pyeapi.connect_to("pynet-sw1")
vlan_command = pynet_sw1.enable("show vlan")
vlan_dict = vlan_command[0]
vlans = vlan_dict['result']
vlan_list = vlans['vlans']
if var_del_vlan in vlan_list.keys():
pynet_sw1 = pyeapi.connect_to("pynet-sw1")
pynet_sw1.config(delete_vlan)
print "Vlan removed"
else:
print "Vlan id does not exist"
if var_vlan_name or var_vlan_id != None:
vlan_name = 'name %s' % results.vlan_name
vlan_id = 'vlan %s' % results.vlan_id
pynet_sw1 = pyeapi.connect_to("pynet-sw1")
vlan_command = pynet_sw1.enable("show vlan")
vlan_dict = vlan_command[0]
vlans = vlan_dict['result']
vlan_list = vlans['vlans']
cmds=[vlan_id, vlan_name]
if var_vlan_id in vlan_list.keys():
print "Vlan id exists"
else:
pynet_sw1 = pyeapi.connect_to("pynet-sw1")
pynet_sw1.config(cmds)
|
from setuptools import setup, find_packages
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name="pytiingo",
version="0.0.1",
description="Python SDK for Tiingo Financial Markets API",
long_description=long_description,
long_description_content_type="text/markdown",
author="Philip Kung",
author_email="pkung67@utexas.edu",
url="https://github.com/philipk19238/pytiingo",
packages=find_packages(),
install_requires=[
'requests',
'python-dotenv',
'pandas',
'pydantic'
]
)
|
#!/usr/bin/env python
# Copyright 2016 Johns Hopkins University (Author: Daniel Povey)
# Apache 2.0.
from __future__ import print_function
from __future__ import division
import sys
import argparse
import math
from collections import defaultdict
# note, this was originally based
parser = argparse.ArgumentParser(
description="""
This script creates a language model that's intended to be used in modeling
phone sequences (either of sentences or of dictionary entries), although of
course it will work for any type of data. The easiest way
to describe it is as a a Kneser-Ney language model (unmodified, with addition)
with a fixed discounting constant equal to 1, except with no smoothing of the
bigrams (and hence no unigram state). This is (a) because we want to keep the
graph after context expansion small, (b) because languages tend to have
constraints on which phones can follow each other, and (c) in order to get valid
sequences of word-position-dependent phones so that lattice-align-words can
work. It also includes have a special entropy-based pruning technique that
backs off the statistics of pruned n-grams to lower-order states.
This script reads lines from its standard input, each
consisting of a sequence of integer symbol-ids (which should be > 0),
representing the phone sequences of a sentence or dictionary entry.
This script outputs a backoff language model in FST format""",
epilog="See also utils/lang/make_phone_bigram_lang.sh",
)
parser.add_argument(
"--phone-disambig-symbol",
type=int,
required=False,
help="Integer corresponding to an otherwise-unused "
"phone-level disambiguation symbol (e.g. #5). This is "
"inserted at the beginning of the phone sequence and "
"whenever we back off.",
)
parser.add_argument(
"--ngram-order",
type=int,
default=4,
choices=[2, 3, 4, 5, 6, 7],
help="Order of n-gram to use (but see also --num-extra-states;"
"the effective order after pruning may be less.",
)
parser.add_argument(
"--num-extra-ngrams",
type=int,
default=20000,
help="Target number of n-grams in addition to the n-grams in "
"the bigram LM states which can't be pruned away. n-grams "
"will be pruned to reach this target.",
)
parser.add_argument(
"--no-backoff-ngram-order",
type=int,
default=2,
choices=[1, 2, 3, 4, 5],
help="This specifies the n-gram order at which (and below which) "
"no backoff or pruning should be done. This is expected to normally "
"be bigram, but for testing purposes you may want to set it to "
"1.",
)
parser.add_argument(
"--print-as-arpa",
type=str,
default="false",
choices=["true", "false"],
help="If true, print LM in ARPA format (default is to print "
"as FST). You must also set --no-backoff-ngram-order=1 or "
"this is not allowed.",
)
parser.add_argument(
"--verbose", type=int, default=0, choices=[0, 1, 2, 3, 4, 5], help="Verbose level"
)
args = parser.parse_args()
if args.verbose >= 1:
print(" ".join(sys.argv), file=sys.stderr)
class CountsForHistory(object):
## This class (which is more like a struct) stores the counts seen in a
## particular history-state. It is used inside class NgramCounts.
## It really does the job of a dict from int to float, but it also
## keeps track of the total count.
def __init__(self):
# The 'lambda: defaultdict(float)' is an anonymous function taking no
# arguments that returns a new defaultdict(float).
self.word_to_count = defaultdict(int)
self.total_count = 0
def Words(self):
return list(self.word_to_count.keys())
def __str__(self):
# e.g. returns ' total=12 3->4 4->6 -1->2'
return " total={0} {1}".format(
str(self.total_count),
" ".join(
[
"{0} -> {1}".format(word, count)
for word, count in self.word_to_count.items()
]
),
)
## Adds a certain count (expected to be integer, but might be negative). If
## the resulting count for this word is zero, removes the dict entry from
## word_to_count.
## [note, though, that in some circumstances we 'add back' zero counts
## where the presence of n-grams would be structurally required by the arpa,
## specifically if a higher-order history state has a nonzero count,
## we need to structurally have the count there in the states it backs
## off to.
def AddCount(self, predicted_word, count):
self.total_count += count
assert self.total_count >= 0
old_count = self.word_to_count[predicted_word]
new_count = old_count + count
if new_count < 0:
print(
"predicted-word={0}, old-count={1}, count={2}".format(
predicted_word, old_count, count
)
)
assert new_count >= 0
if new_count == 0:
del self.word_to_count[predicted_word]
else:
self.word_to_count[predicted_word] = new_count
class NgramCounts(object):
## A note on data-structure. Firstly, all words are represented as
## integers. We store n-gram counts as an array, indexed by (history-length
## == n-gram order minus one) (note: python calls arrays "lists") of dicts
## from histories to counts, where histories are arrays of integers and
## "counts" are dicts from integer to float. For instance, when
## accumulating the 4-gram count for the '8' in the sequence '5 6 7 8', we'd
## do as follows: self.counts[3][[5,6,7]][8] += 1.0 where the [3] indexes an
## array, the [[5,6,7]] indexes a dict, and the [8] indexes a dict.
def __init__(self, ngram_order):
assert ngram_order >= 2
# Integerized counts will never contain negative numbers, so
# inside this program, we use -3 and -2 for the BOS and EOS symbols
# respectively.
# Note: it's actually important that the bos-symbol is the most negative;
# it helps ensure that we print the state with left-context <s> first
# when we print the FST, and this means that the start-state will have
# the correct value.
self.bos_symbol = -3
self.eos_symbol = -2
# backoff_symbol is kind of a pseudo-word, it's used in keeping track of
# the backoff counts in each state.
self.backoff_symbol = -1
self.total_num_words = 0 # count includes EOS but not BOS.
self.counts = []
for n in range(ngram_order):
self.counts.append(defaultdict(lambda: CountsForHistory()))
# adds a raw count (called while processing input data).
# Suppose we see the sequence '6 7 8 9' and ngram_order=4, 'history'
# would be (6,7,8) and 'predicted_word' would be 9; 'count' would be
# 1.
def AddCount(self, history, predicted_word, count):
self.counts[len(history)][history].AddCount(predicted_word, count)
# 'line' is a string containing a sequence of integer word-ids.
# This function adds the un-smoothed counts from this line of text.
def AddRawCountsFromLine(self, line):
try:
words = (
[self.bos_symbol] + [int(x) for x in line.split()] + [self.eos_symbol]
)
except:
sys.exit(
"make_phone_lm.py: bad input line {0} (expected a sequence "
"of integers)".format(line)
)
for n in range(1, len(words)):
predicted_word = words[n]
history_start = max(0, n + 1 - args.ngram_order)
history = tuple(words[history_start:n])
self.AddCount(history, predicted_word, 1)
self.total_num_words += 1
def AddRawCountsFromStandardInput(self):
lines_processed = 0
while True:
line = sys.stdin.readline()
if line == "":
break
self.AddRawCountsFromLine(line)
lines_processed += 1
if lines_processed == 0 or args.verbose > 0:
print(
"make_phone_lm.py: processed {0} lines of input".format(
lines_processed
),
file=sys.stderr,
)
# This backs off the counts by subtracting 1 and assigning the subtracted
# count to the backoff state. It's like a special case of Kneser-Ney with D
# = 1. The optimal D would likely be something like 0.9, but we plan to
# later do entropy-pruning, and the remaining small counts of 0.1 would
# essentially all get pruned away anyway, so we don't lose much by doing it
# like this.
def ApplyBackoff(self):
# note: in the normal case where args.no_backoff_ngram_order == 2 we
# don't do backoff for history-length = 1 (i.e. for bigrams)... this is
# a kind of special LM where we're not going to back off to unigram,
# there will be no unigram.
if args.verbose >= 1:
initial_num_ngrams = self.GetNumNgrams()
for n in reversed(list(range(args.no_backoff_ngram_order, args.ngram_order))):
this_order_counts = self.counts[n]
for hist, counts_for_hist in this_order_counts.items():
backoff_hist = hist[1:]
backoff_counts_for_hist = self.counts[n - 1][backoff_hist]
this_discount_total = 0
for word in counts_for_hist.Words():
counts_for_hist.AddCount(word, -1)
# You can interpret the following line as incrementing the
# count-of-counts for the next-lower order. Note, however,
# that later when we remove n-grams, we'll also add their
# counts to the next-lower-order history state, so the
# resulting counts won't strictly speaking be
# counts-of-counts.
backoff_counts_for_hist.AddCount(word, 1)
this_discount_total += 1
counts_for_hist.AddCount(self.backoff_symbol, this_discount_total)
if args.verbose >= 1:
# Note: because D == 1, we completely back off singletons.
print(
"make_phone_lm.py: ApplyBackoff() reduced the num-ngrams from "
"{0} to {1}".format(initial_num_ngrams, self.GetNumNgrams()),
file=sys.stderr,
)
# This function prints out to stderr the n-gram counts stored in this
# object; it's used for debugging.
def Print(self, info_string):
print(info_string, file=sys.stderr)
# these are useful for debug.
total = 0.0
total_excluding_backoff = 0.0
for this_order_counts in self.counts:
for hist, counts_for_hist in this_order_counts.items():
print(str(hist) + str(counts_for_hist), file=sys.stderr)
total += counts_for_hist.total_count
total_excluding_backoff += counts_for_hist.total_count
if self.backoff_symbol in counts_for_hist.word_to_count:
total_excluding_backoff -= counts_for_hist.word_to_count[
self.backoff_symbol
]
print(
"total count = {0}, excluding backoff = {1}".format(
total, total_excluding_backoff
),
file=sys.stderr,
)
def GetHistToStateMap(self):
# This function, called from PrintAsFst, returns a map from
# history to integer FST-state.
hist_to_state = dict()
fst_state_counter = 0
for n in range(0, args.ngram_order):
for hist in self.counts[n].keys():
hist_to_state[hist] = fst_state_counter
fst_state_counter += 1
return hist_to_state
# Returns the probability of word 'word' in history-state 'hist'.
# If 'word' is self.backoff_symbol, returns the backoff prob
# of this history-state.
# Returns None if there is no such word in this history-state, or this
# history-state does not exist.
def GetProb(self, hist, word):
if len(hist) >= args.ngram_order or not hist in self.counts[len(hist)]:
return None
counts_for_hist = self.counts[len(hist)][hist]
total_count = float(counts_for_hist.total_count)
if not word in counts_for_hist.word_to_count:
print(
"make_phone_lm.py: no prob for {0} -> {1} "
"[no such count]".format(hist, word),
file=sys.stderr,
)
return None
prob = float(counts_for_hist.word_to_count[word]) / total_count
if (
len(hist) > 0
and word != self.backoff_symbol
and self.backoff_symbol in counts_for_hist.word_to_count
):
prob_in_backoff = self.GetProb(hist[1:], word)
backoff_prob = (
float(counts_for_hist.word_to_count[self.backoff_symbol]) / total_count
)
try:
prob += backoff_prob * prob_in_backoff
except:
sys.exit("problem, hist is {0}, word is {1}".format(hist, word))
return prob
def PruneEmptyStates(self):
# Removes history-states that have no counts.
# It's possible in principle for history-states to have no counts and
# yet they cannot be pruned away because a higher-order version of the
# state exists with nonzero counts, so we have to keep track of this.
protected_histories = set()
states_removed_per_hist_len = [0] * args.ngram_order
for n in reversed(list(range(args.no_backoff_ngram_order, args.ngram_order))):
num_states_removed = 0
for hist, counts_for_hist in self.counts[n].items():
l = len(counts_for_hist.word_to_count)
assert l > 0 and self.backoff_symbol in counts_for_hist.word_to_count
if (
l == 1 and not hist in protected_histories
): # only the backoff symbol has a count.
del self.counts[n][hist]
num_states_removed += 1
else:
# if this state was not pruned away, then the state that
# it backs off to may not be pruned away either.
backoff_hist = hist[1:]
protected_histories.add(backoff_hist)
states_removed_per_hist_len[n] = num_states_removed
if args.verbose >= 1:
print(
"make_phone_lm.py: in PruneEmptyStates(), num states removed for "
"each history-length was: " + str(states_removed_per_hist_len),
file=sys.stderr,
)
def EnsureStructurallyNeededNgramsExist(self):
# makes sure that if an n-gram like (6, 7, 8) -> 9 exists,
# then counts exist for (7, 8) -> 9 and (8,) -> 9. It does so
# by adding zero counts where such counts were absent.
# [note: () -> 9 is guaranteed anyway by the backoff method, if
# we have a unigram state].
if args.verbose >= 1:
num_ngrams_initial = self.GetNumNgrams()
for n in reversed(list(range(args.no_backoff_ngram_order, args.ngram_order))):
for hist, counts_for_hist in self.counts[n].items():
# This loop ensures that if we have an n-gram like (6, 7, 8) -> 9,
# then, say, (7, 8) -> 9 and (8) -> 9 exist.
reduced_hist = hist
for m in reversed(list(range(args.no_backoff_ngram_order, n))):
reduced_hist = reduced_hist[1:] # shift an element off
# the history.
counts_for_backoff_hist = self.counts[m][reduced_hist]
for word in counts_for_hist.word_to_count.keys():
counts_for_backoff_hist.word_to_count[word] += 0
# This loop ensures that if we have an n-gram like (6, 7, 8) -> 9,
# then, say, (6, 7) -> 8 and (6) -> 7 exist. This will be needed
# for FST representations of the ARPA LM.
reduced_hist = hist
for m in reversed(list(range(args.no_backoff_ngram_order, n))):
this_word = reduced_hist[-1]
reduced_hist = reduced_hist[:-1] # pop an element off the
# history
counts_for_backoff_hist = self.counts[m][reduced_hist]
counts_for_backoff_hist.word_to_count[this_word] += 0
if args.verbose >= 1:
print(
"make_phone_lm.py: in EnsureStructurallyNeededNgramsExist(), "
"added {0} n-grams".format(self.GetNumNgrams() - num_ngrams_initial),
file=sys.stderr,
)
# This function prints the estimated language model as an FST.
def PrintAsFst(self, word_disambig_symbol):
# n is the history-length (== order + 1). We iterate over the
# history-length in the order 1, 0, 2, 3, and then iterate over the
# histories of each order in sorted order. Putting order 1 first
# and sorting on the histories
# ensures that the bigram state with <s> as the left context comes first.
# (note: self.bos_symbol is the most negative symbol)
# History will map from history (as a tuple) to integer FST-state.
hist_to_state = self.GetHistToStateMap()
for n in [1, 0] + list(range(2, args.ngram_order)):
this_order_counts = self.counts[n]
# For order 1, make sure the keys are sorted.
keys = (
this_order_counts.keys() if n != 1 else sorted(this_order_counts.keys())
)
for hist in keys:
word_to_count = this_order_counts[hist].word_to_count
this_fst_state = hist_to_state[hist]
for word in word_to_count.keys():
# work out this_cost. Costs in OpenFst are negative logs.
this_cost = -math.log(self.GetProb(hist, word))
if word > 0: # a real word.
next_hist = hist + (word,) # appending tuples
while not next_hist in hist_to_state:
next_hist = next_hist[1:]
next_fst_state = hist_to_state[next_hist]
print(this_fst_state, next_fst_state, word, word, this_cost)
elif word == self.eos_symbol:
# print final-prob for this state.
print(this_fst_state, this_cost)
else:
assert word == self.backoff_symbol
backoff_fst_state = hist_to_state[hist[1 : len(hist)]]
print(
this_fst_state,
backoff_fst_state,
word_disambig_symbol,
0,
this_cost,
)
# This function returns a set of n-grams that cannot currently be pruned
# away, either because a higher-order form of the same n-gram already exists,
# or because the n-gram leads to an n-gram state that exists.
# [Note: as we prune, we remove any states that can be removed; see that
# PruneToIntermediateTarget() calls PruneEmptyStates().
def GetProtectedNgrams(self):
ans = set()
for n in range(args.no_backoff_ngram_order + 1, args.ngram_order):
for hist, counts_for_hist in self.counts[n].items():
# If we have an n-gram (6, 7, 8) -> 9, the following loop will
# add the backed-off n-grams (7, 8) -> 9 and (8) -> 9 to
# 'protected-ngrams'.
reduced_hist = hist
for m in reversed(list(range(args.no_backoff_ngram_order, n))):
reduced_hist = reduced_hist[1:] # shift an element off
# the history.
for word in counts_for_hist.word_to_count.keys():
if word != self.backoff_symbol:
ans.add(reduced_hist + (word,))
# The following statement ensures that if we are in a
# history-state (6, 7, 8), then n-grams (6, 7, 8) and (6, 7) are
# protected. This assures that the FST states are accessible.
reduced_hist = hist
for m in reversed(list(range(args.no_backoff_ngram_order, n))):
ans.add(reduced_hist)
reduced_hist = reduced_hist[:-1] # pop an element off the
# history
return ans
def PruneNgram(self, hist, word):
counts_for_hist = self.counts[len(hist)][hist]
assert word != self.backoff_symbol and word in counts_for_hist.word_to_count
count = counts_for_hist.word_to_count[word]
del counts_for_hist.word_to_count[word]
counts_for_hist.word_to_count[self.backoff_symbol] += count
# the next call adds the count to the symbol 'word' in the backoff
# history-state, and also updates its 'total_count'.
self.counts[len(hist) - 1][hist[1:]].AddCount(word, count)
# The function PruningLogprobChange is the same as the same-named
# function in float-counts-prune.cc in pocolm. Note, it doesn't access
# any class members.
# This function computes the log-likelihood change (<= 0) from backing off
# a particular symbol to the lower-order state.
# The value it returns can be interpreted as a lower bound the actual log-likelihood
# change. By "the actual log-likelihood change" we mean of data generated by
# the model itself before making the change, then modeled with the changed model
# [and comparing the log-like with the log-like before changing the model]. That is,
# it's a K-L divergence, but with the caveat that we don't normalize by the
# overall count of the data, so it's a K-L divergence multiplied by the training-data
# count.
# 'count' is the count of the word (call it 'a') in this state. It's an integer.
# 'discount' is the discount-count in this state (represented as the count
# for the symbol self.backoff_symbol). It's an integer.
# [note: we don't care about the total-count in this state, it cancels out.]
# 'backoff_count' is the count of word 'a' in the lower-order state.
# [actually it is the augmented count, treating any
# extra probability from even-lower-order states as
# if it were a count]. It's a float.
# 'backoff_total' is the total count in the lower-order state. It's a float.
def PruningLogprobChange(self, count, discount, backoff_count, backoff_total):
if count == 0:
return 0.0
assert (
discount > 0
and backoff_total >= backoff_count
and backoff_total >= 0.99 * discount
)
# augmented_count is like 'count', but with the extra count for symbol
# 'a' due to backoff included.
augmented_count = count + discount * backoff_count / backoff_total
# We imagine a phantom symbol 'b' that represents all symbols other than
# 'a' appearing in this history-state that are accessed via backoff. We
# treat these as being distinct symbols from the same symbol if accessed
# not-via-backoff. (Treating same symbols as distinct gives an upper bound
# on the divergence). We also treat them as distinct from the same symbols
# that are being accessed via backoff from other states. b_count is the
# observed count of symbol 'b' in this state (the backed-off count is
# zero). b_count is also the count of symbol 'b' in the backoff state.
# Note: b_count will not be negative because backoff_total >= backoff_count.
b_count = discount * ((backoff_total - backoff_count) / backoff_total)
assert b_count >= -0.001 * backoff_total
# We imagine a phantom symbol 'c' that represents all symbols other than
# 'a' and 'b' appearing in the backoff state, which got there from
# backing off other states (other than 'this' state). Again, we imagine
# the symbols are distinct even though they may not be (i.e. that c and
# b represent disjoint sets of symbol, even though they might not really
# be disjoint), and this gives us an upper bound on the divergence.
c_count = backoff_total - backoff_count - b_count
assert c_count >= -0.001 * backoff_total
# a_other is the count of 'a' in the backoff state that comes from
# 'other sources', i.e. it was backed off from history-states other than
# the current history state.
a_other_count = backoff_count - discount * backoff_count / backoff_total
assert a_other_count >= -0.001 * backoff_count
# the following sub-expressions are the 'new' versions of certain
# quantities after we assign the total count 'count' to backoff. it
# increases the backoff count in 'this' state, and also the total count
# in the backoff state, and the count of symbol 'a' in the backoff
# state.
new_backoff_count = backoff_count + count # new count of symbol 'a' in
# backoff state
new_backoff_total = backoff_total + count # new total count in
# backoff state.
new_discount = discount + count # new discount-count in 'this' state.
# all the loglike changes below are of the form
# count-of-symbol * log(new prob / old prob)
# which can be more conveniently written (by canceling the denominators),
# count-of-symbol * log(new count / old count).
# this_a_change is the log-like change of symbol 'a' coming from 'this'
# state. bear in mind that
# augmented_count = count + discount * backoff_count / backoff_total,
# and the 'count' term is zero in the numerator part of the log expression,
# because symbol 'a' is completely backed off in 'this' state.
this_a_change = augmented_count * math.log(
(new_discount * new_backoff_count / new_backoff_total) / augmented_count
)
# other_a_change is the log-like change of symbol 'a' coming from all
# other states than 'this'. For speed reasons we don't examine the
# direct (non-backoff) counts of symbol 'a' in all other states than
# 'this' that back off to the backoff state-- it would be slower.
# Instead we just treat the direct part of the prob for symbol 'a' as a
# distinct symbol when it comes from those other states... as usual,
# doing so gives us an upper bound on the divergence.
other_a_change = a_other_count * math.log(
(new_backoff_count / new_backoff_total) / (backoff_count / backoff_total)
)
# b_change is the log-like change of phantom symbol 'b' coming from
# 'this' state (and note: it only comes from this state, that's how we
# defined it).
# note: the expression below could be more directly written as a
# ratio of pseudo-counts as follows, by converting the backoff probabilities
# into pseudo-counts in 'this' state:
# b_count * logf((new_discount * b_count / new_backoff_total) /
# (discount * b_count / backoff_total),
# but we cancel b_count to give us the expression below.
b_change = b_count * math.log(
(new_discount / new_backoff_total) / (discount / backoff_total)
)
# c_change is the log-like change of phantom symbol 'c' coming from
# all other states that back off to the backoff sate (and all prob. mass of
# 'c' comes from those other states). The expression below could be more
# directly written as a ratio of counts, as c_count * logf((c_count /
# new_backoff_total) / (c_count / backoff_total)), but we simplified it to
# the expression below.
c_change = c_count * math.log(backoff_total / new_backoff_total)
ans = this_a_change + other_a_change + b_change + c_change
# the answer should not be positive.
assert ans <= 0.0001 * (count + discount + backoff_count + backoff_total)
if args.verbose >= 4:
print(
"pruning-logprob-change for {0},{1},{2},{3} is {4}".format(
count, discount, backoff_count, backoff_total, ans
),
file=sys.stderr,
)
return ans
def GetLikeChangeFromPruningNgram(self, hist, word):
counts_for_hist = self.counts[len(hist)][hist]
counts_for_backoff_hist = self.counts[len(hist) - 1][hist[1:]]
assert word != self.backoff_symbol and word in counts_for_hist.word_to_count
count = counts_for_hist.word_to_count[word]
discount = counts_for_hist.word_to_count[self.backoff_symbol]
backoff_total = counts_for_backoff_hist.total_count
# backoff_count is a pseudo-count: it's like the count of 'word' in the
# backoff history-state, but adding something to account for further
# levels of backoff.
try:
backoff_count = self.GetProb(hist[1:], word) * backoff_total
except:
print(
"problem getting backoff count: hist = {0}, word = {1}".format(
hist, word
),
file=sys.stderr,
)
sys.exit(1)
return self.PruningLogprobChange(
float(count), float(discount), backoff_count, float(backoff_total)
)
# note: returns loglike change per word.
def PruneToIntermediateTarget(self, num_extra_ngrams):
protected_ngrams = self.GetProtectedNgrams()
initial_num_extra_ngrams = self.GetNumExtraNgrams()
num_ngrams_to_prune = initial_num_extra_ngrams - num_extra_ngrams
assert num_ngrams_to_prune > 0
num_candidates_per_order = [0] * args.ngram_order
num_pruned_per_order = [0] * args.ngram_order
# like_change_and_ngrams this will be a list of tuples consisting
# of the likelihood change as a float and then the words of the n-gram
# that we're considering pruning,
# e.g. (-0.164, 7, 8, 9)
# meaning that pruning the n-gram (7, 8) -> 9 leads to
# a likelihood change of -0.164. We'll later sort this list
# so we can prune the n-grams that made the least-negative
# likelihood change.
like_change_and_ngrams = []
for n in range(args.no_backoff_ngram_order, args.ngram_order):
for hist, counts_for_hist in self.counts[n].items():
for word, count in counts_for_hist.word_to_count.items():
if word != self.backoff_symbol:
if not hist + (word,) in protected_ngrams:
like_change = self.GetLikeChangeFromPruningNgram(hist, word)
like_change_and_ngrams.append(
(like_change,) + hist + (word,)
)
num_candidates_per_order[len(hist)] += 1
like_change_and_ngrams.sort(reverse=True)
if num_ngrams_to_prune > len(like_change_and_ngrams):
print(
"make_phone_lm.py: aimed to prune {0} n-grams but could only "
"prune {1}".format(num_ngrams_to_prune, len(like_change_and_ngrams)),
file=sys.stderr,
)
num_ngrams_to_prune = len(like_change_and_ngrams)
total_loglike_change = 0.0
for i in range(num_ngrams_to_prune):
total_loglike_change += like_change_and_ngrams[i][0]
hist = like_change_and_ngrams[i][1:-1] # all but 1st and last elements
word = like_change_and_ngrams[i][-1] # last element
num_pruned_per_order[len(hist)] += 1
self.PruneNgram(hist, word)
like_change_per_word = total_loglike_change / self.total_num_words
if args.verbose >= 1:
effective_threshold = (
like_change_and_ngrams[num_ngrams_to_prune - 1][0]
if num_ngrams_to_prune >= 0
else 0.0
)
print(
"Pruned from {0} ngrams to {1}, with threshold {2}. Candidates per order were {3}, "
"num-ngrams pruned per order were {4}. Like-change per word was {5}".format(
initial_num_extra_ngrams,
initial_num_extra_ngrams - num_ngrams_to_prune,
"%.4f" % effective_threshold,
num_candidates_per_order,
num_pruned_per_order,
like_change_per_word,
),
file=sys.stderr,
)
if args.verbose >= 3:
print(
"Pruning: like_change_and_ngrams is:\n"
+ "\n".join(
[str(x) for x in like_change_and_ngrams[:num_ngrams_to_prune]]
)
+ "\n-------- stop pruning here: ----------\n"
+ "\n".join(
[str(x) for x in like_change_and_ngrams[num_ngrams_to_prune:]]
),
file=sys.stderr,
)
self.Print(
"Counts after pruning to num-extra-ngrams={0}".format(
initial_num_extra_ngrams - num_ngrams_to_prune
)
)
self.PruneEmptyStates()
if args.verbose >= 3:
ngram_counts.Print(
"Counts after removing empty states [inside pruning algorithm]:"
)
return like_change_per_word
def PruneToFinalTarget(self, num_extra_ngrams):
# prunes to a specified num_extra_ngrams. The 'extra_ngrams' refers to
# the count of n-grams of order higher than args.no_backoff_ngram_order.
# We construct a sequence of targets that gradually approaches
# this value. Doing it iteratively like this is a good way
# to deal with the fact that sometimes we can't prune a certain
# n-gram before certain other n-grams are pruned (because
# they lead to a state that must be kept, or an n-gram exists
# that backs off to this n-gram).
current_num_extra_ngrams = self.GetNumExtraNgrams()
if num_extra_ngrams >= current_num_extra_ngrams:
print(
"make_phone_lm.py: not pruning since target num-extra-ngrams={0} is >= "
"current num-extra-ngrams={1}".format(
num_extra_ngrams, current_num_extra_ngrams
),
file=sys.stderr,
)
return
target_sequence = [num_extra_ngrams]
# two final iterations where the targets differ by factors of 1.1,
# preceded by two iterations where the targets differ by factors of 1.2.
for this_factor in [1.1, 1.2]:
for n in range(0, 2):
if (
int((target_sequence[-1] + 1) * this_factor)
< current_num_extra_ngrams
):
target_sequence.append(int((target_sequence[-1] + 1) * this_factor))
# then change in factors of 1.3
while True:
this_factor = 1.3
if int((target_sequence[-1] + 1) * this_factor) < current_num_extra_ngrams:
target_sequence.append(int((target_sequence[-1] + 1) * this_factor))
else:
break
target_sequence = list(set(target_sequence)) # only keep unique targets.
target_sequence.sort(reverse=True)
print(
"make_phone_lm.py: current num-extra-ngrams={0}, pruning with "
"following sequence of targets: {1}".format(
current_num_extra_ngrams, target_sequence
),
file=sys.stderr,
)
total_like_change_per_word = 0.0
for target in target_sequence:
total_like_change_per_word += self.PruneToIntermediateTarget(target)
if args.verbose >= 1:
print(
"make_phone_lm.py: K-L divergence from pruning (upper bound) is "
"%.4f" % total_like_change_per_word,
file=sys.stderr,
)
# returns the number of n-grams on top of those that can't be pruned away
# because their order is <= args.no_backoff_ngram_order.
def GetNumExtraNgrams(self):
ans = 0
for hist_len in range(args.no_backoff_ngram_order, args.ngram_order):
# note: hist_len + 1 is the actual order.
ans += self.GetNumNgrams(hist_len)
return ans
def GetNumNgrams(self, hist_len=None):
ans = 0
if hist_len == None:
for hist_len in range(args.ngram_order):
# note: hist_len + 1 is the actual order.
ans += self.GetNumNgrams(hist_len)
return ans
else:
for counts_for_hist in self.counts[hist_len].values():
ans += len(counts_for_hist.word_to_count)
if self.backoff_symbol in counts_for_hist.word_to_count:
ans -= 1 # don't count the backoff symbol, it doesn't produce
# its own n-gram line.
return ans
# this function, used in PrintAsArpa, converts an integer to
# a string by either printing it as a string, or for self.bos_symbol
# and self.eos_symbol, printing them as "<s>" and "</s>" respectively.
def IntToString(self, i):
if i == self.bos_symbol:
return "<s>"
elif i == self.eos_symbol:
return "</s>"
else:
assert i != self.backoff_symbol
return str(i)
def PrintAsArpa(self):
# Prints out the FST in ARPA format.
assert args.no_backoff_ngram_order == 1 # without unigrams we couldn't
# print as ARPA format.
print("\\data\\")
for hist_len in range(args.ngram_order):
# print the number of n-grams. Add 1 for the 1-gram
# section because of <s>, we print -99 as the prob so we
# have a place to put the backoff prob.
print(
"ngram {0}={1}".format(
hist_len + 1,
self.GetNumNgrams(hist_len) + (1 if hist_len == 0 else 0),
)
)
print("")
for hist_len in range(args.ngram_order):
print("\\{0}-grams:".format(hist_len + 1))
# print fake n-gram for <s>, for its backoff prob.
if hist_len == 0:
backoff_prob = self.GetProb((self.bos_symbol,), self.backoff_symbol)
if backoff_prob != None:
print("-99\t<s>\t{0}".format("%.5f" % math.log10(backoff_prob)))
for hist in self.counts[hist_len].keys():
for word in self.counts[hist_len][hist].word_to_count.keys():
if word != self.backoff_symbol:
prob = self.GetProb(hist, word)
assert prob != None and prob > 0
backoff_prob = self.GetProb(
(hist) + (word,), self.backoff_symbol
)
line = "{0}\t{1}".format(
"%.5f" % math.log10(prob),
" ".join(self.IntToString(x) for x in hist + (word,)),
)
if backoff_prob != None:
line += "\t{0}".format("%.5f" % math.log10(backoff_prob))
print(line)
print("")
print("\\end\\")
ngram_counts = NgramCounts(args.ngram_order)
ngram_counts.AddRawCountsFromStandardInput()
if args.verbose >= 3:
ngram_counts.Print("Raw counts:")
ngram_counts.ApplyBackoff()
if args.verbose >= 3:
ngram_counts.Print("Counts after applying Kneser-Ney discounting:")
ngram_counts.EnsureStructurallyNeededNgramsExist()
if args.verbose >= 3:
ngram_counts.Print("Counts after adding structurally-needed n-grams (1st time):")
ngram_counts.PruneEmptyStates()
if args.verbose >= 3:
ngram_counts.Print("Counts after removing empty states:")
ngram_counts.PruneToFinalTarget(args.num_extra_ngrams)
ngram_counts.EnsureStructurallyNeededNgramsExist()
if args.verbose >= 3:
ngram_counts.Print("Counts after adding structurally-needed n-grams (2nd time):")
if args.print_as_arpa == "true":
ngram_counts.PrintAsArpa()
else:
if args.phone_disambig_symbol == None:
sys.exit(
"make_phone_lm.py: --phone-disambig-symbol must be provided (unless "
"you are writing as ARPA"
)
ngram_counts.PrintAsFst(args.phone_disambig_symbol)
## Below are some little test commands that can be used to look at the detailed stats
## for a kind of sanity check.
# test comand:
# (echo 6 7 8 4; echo 7 8 9; echo 7 8; echo 7 4; echo 8 4 ) | utils/lang/make_phone_lm.py --phone-disambig-symbol=400 --verbose=3
# (echo 6 7 8 4; echo 7 8 9; echo 7 8; echo 7 4; echo 8 4 ) | utils/lang/make_phone_lm.py --phone-disambig-symbol=400 --verbose=3 --num-extra-ngrams=0
# (echo 6 7 8 4; echo 6 7 ) | utils/lang/make_phone_lm.py --print-as-arpa=true --no-backoff-ngram-order=1 --verbose=3
## The following shows how we created some data suitable to do comparisons with
## other language modeling toolkits. Note: we're running in a configuration
## where --no-backoff-ngram-order=1 (i.e. we have a unigram LM state) because
## it's the only way to get perplexity calculations and to write an ARPA file.
##
# cd egs/tedlium/s5_r2
# . ./path.sh
# mkdir -p lm_test
# ali-to-phones exp/tri3/final.mdl "ark:gunzip -c exp/tri3/ali.*.gz|" ark,t:- | awk '{$1 = ""; print}' > lm_test/phone_seqs
# wc lm_test/phone_seqs
# 92464 8409563 27953288 lm_test/phone_seqs
# head -n 20000 lm_test/phone_seqs > lm_test/train.txt
# tail -n 1000 lm_test/phone_seqs > lm_test/test.txt
## This shows make_phone_lm.py with the default number of extra-lm-states (20k)
## You have to have SRILM on your path to ger perplexities [note: it should be on the
## path if you installed it and you sourced the tedlium s5b path.sh, as above.]
# utils/lang/make_phone_lm.py --print-as-arpa=true --no-backoff-ngram-order=1 --verbose=1 < lm_test/train.txt > lm_test/arpa_pr20k
# ngram -order 4 -unk -lm lm_test/arpa_pr20k -ppl lm_test/test.txt
# file lm_test/test.txt: 1000 sentences, 86489 words, 3 OOVs
# 0 zeroprobs, logprob= -80130.1 ppl=*8.23985* ppl1= 8.44325
# on training data: 0 zeroprobs, logprob= -1.6264e+06 ppl= 7.46947 ppl1= 7.63431
## This shows make_phone_lm.py without any pruning (make --num-extra-ngrams very large).
# utils/lang/make_phone_lm.py --print-as-arpa=true --num-extra-ngrams=1000000 --no-backoff-ngram-order=1 --verbose=1 < lm_test/train.txt > lm_test/arpa
# ngram -order 4 -unk -lm lm_test/arpa -ppl lm_test/test.txt
# file lm_test/test.txt: 1000 sentences, 86489 words, 3 OOVs
# 0 zeroprobs, logprob= -74976 ppl=*7.19459* ppl1= 7.36064
# on training data: 0 zeroprobs, logprob= -1.44198e+06 ppl= 5.94659 ppl1= 6.06279
## This is SRILM without pruning (c.f. the 7.19 above, it's slightly better).
# ngram-count -text lm_test/train.txt -order 4 -kndiscount2 -kndiscount3 -kndiscount4 -interpolate -lm lm_test/arpa_srilm
# ngram -order 4 -unk -lm lm_test/arpa_srilm -ppl lm_test/test.txt
# file lm_test/test.txt: 1000 sentences, 86489 words, 3 OOVs
# 0 zeroprobs, logprob= -74742.2 ppl= *7.15044* ppl1= 7.31494
## This is SRILM with a pruning beam tuned to get 20k n-grams above unigram
## (c.f. the 8.23 above, it's a lot worse).
# ngram-count -text lm_test/train.txt -order 4 -kndiscount2 -kndiscount3 -kndiscount4 -interpolate -prune 1.65e-05 -lm lm_test/arpa_srilm.pr1.65e-5
# the model has 20249 n-grams above unigram [c.f. our 20k]
# ngram -order 4 -unk -lm lm_test/arpa_srilm.pr1.65e-5 -ppl lm_test/test.txt
# file lm_test/test.txt: 1000 sentences, 86489 words, 3 OOVs
# 0 zeroprobs, logprob= -86803.7 ppl=*9.82202* ppl1= 10.0849
## This is pocolm..
## Note: we have to hold out some of the training data as dev to
## estimate the hyperparameters, but we'll fold it back in before
## making the final LM. [--fold-dev-into=train]
# mkdir -p lm_test/data/text
# head -n 1000 lm_test/train.txt > lm_test/data/text/dev.txt
# tail -n +1001 lm_test/train.txt > lm_test/data/text/train.txt
## give it a 'large' num-words so it picks them all.
# export PATH=$PATH:../../../tools/pocolm/scripts
# train_lm.py --num-word=100000 --fold-dev-into=train lm_test/data/text 4 lm_test/data/lm_unpruned
# get_data_prob.py lm_test/test.txt lm_test/data/lm_unpruned/100000_4.pocolm
## compute-probs: average log-prob per word was -1.95956 (perplexity = *7.0962*) over 87489 words.
## Note: we can compare this perplexity with 7.15 with SRILM and 7.19 with make_phone_lm.py.
# pruned_lm_dir=${lm_dir}/${num_word}_${order}_prune${threshold}.pocolm
# prune_lm_dir.py --target-num-ngrams=20100 lm_test/data/lm_unpruned/100000_4.pocolm lm_test/data/lm_unpruned/100000_4_pr20k.pocolm
# get_data_prob.py lm_test/test.txt lm_test/data/lm_unpruned/100000_4_pr20k.pocolm
## compute-probs: average log-prob per word was -2.0409 (perplexity = 7.69757) over 87489 words.
## note: the 7.69 can be compared with 9.82 from SRILM and 8.23 from pocolm.
## format_arpa_lm.py lm_test/data/lm_unpruned/100000_4_pr20k.pocolm | head
## .. it has 20488 n-grams above unigram. More than 20k but not enough to explain the difference
## .. in perplexity.
## OK... if I reran after modifying prune_lm_dir.py to comment out the line
## 'steps += 'EM EM'.split()' which adds the two EM stages per step, and got the
## perplexity again, I got the following:
## compute-probs: average log-prob per word was -2.09722 (perplexity = 8.14353) over 87489 words.
## .. so it turns out the E-M is actually important.
|
num=int(input("Enter a number : "))
if num%2==0:
print(num," is even.")
else:
print(num," is odd.") |
import unittest
from katas.kyu_7.looking_for_a_benefactor import new_avg
class NewAverageTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(new_avg([
129306, 37783, 169930, 177970, 66848, 68272, 120258, 10307,
162807, 54503, 66465, 177701, 144296, 171044, 126332, 144744,
177657, 61511, 128350, 52167, 103604, 110178, 115495, 97452,
127971, 36683, 190742, 10960, 183186
], 120389.413793), 387161)
def test_equals_2(self):
self.assertEqual(new_avg([14, 30, 5, 7, 9, 11, 16], 90), 628)
def test_equals_3(self):
self.assertEqual(new_avg([14, 30, 5, 7, 9, 11, 15], 92), 645)
def test_exception(self):
self.assertRaises(ValueError, new_avg, [0, 0], 0)
|
"""FPCore context canonicalizer and condenser."""
from ..fpbench import fpcast as ast
from . import interpreter
from . import evalctx
prop_uses_real_precision = {'pre', 'spec'}
op_is_boolean = {
ast.LT,
ast.GT,
ast.LEQ,
ast.GEQ,
ast.EQ,
ast.NEQ,
ast.Isfinite,
ast.Isinf,
ast.Isnan,
ast.Isnormal,
ast.Signbit,
ast.And,
ast.Or,
ast.Not,
}
class Canonicalizer(interpreter.Evaluator):
"""FPCore canonicalizer.
Push all annotations out to the leaves, so each operation / constants
is annotated with its full rounding context.
"""
@classmethod
def _eval_var(cls, e, ctx):
return e
@classmethod
def _eval_val(cls, e, ctx):
if len(ctx.props) == 0:
return e
else:
return ast.Ctx(props=ctx.props, body=e)
@classmethod
def _eval_if(cls, e, ctx):
return ast.If(
cls.evaluate(e.cond, ctx),
cls.evaluate(e.then_body, ctx),
cls.evaluate(e.else_body, ctx),
)
@classmethod
def _eval_let(cls, e, ctx):
return ast.Let(
[(name, cls.evaluate(expr, ctx)) for name, expr in e.let_bindings],
cls.evaluate(e.body, ctx),
)
@classmethod
def _eval_while(cls, e, ctx):
return ast.While(
cls.evaluate(e.cond, ctx),
[(name, cls.evaluate(init_expr, ctx), cls.evaluate(update_expr, ctx))
for name, init_expr, update_expr in e.while_bindings],
cls.evaluate(e.body, ctx),
)
@classmethod
def _eval_op(cls, e, ctx):
children = (cls.evaluate(child, ctx) for child in e.children)
if len(ctx.props) == 0 or type(e) in op_is_boolean:
return type(e)(*children)
else:
return ast.Ctx(props=ctx.props, body=type(e)(*children))
# translator interface
@classmethod
def translate(cls, core, ctx=None,
propagate={'precision', 'round', 'math-library'},
recurse={'pre', 'spec'}):
if ctx is None:
ctx = cls.ctype(props={k:v for k, v in core.props.items() if k in propagate})
else:
ctx = ctx.let(props={k:v for k, v in core.props.items() if k in propagate})
inputs = [(name, ctx.let(props=props).props) for name, props in core.inputs]
e = cls.evaluate(core.e, ctx)
props = {}
for k, v in core.props.items():
if k in recurse:
if k in propagate:
raise ValueError('Canonicalizer: cannot propagate and recurse on the same property: {}'
.format(str(k)))
elif isinstance(v, ast.Expr):
if k in prop_uses_real_precision:
rectx = ctx.let(props={'precision': ast.Var('real')})
else:
rectx = ctx
print(rectx)
props[k] = cls.evaluate(v, rectx)
else:
props[k] = v
elif k not in propagate:
props[k] = v
return ast.FPCore(inputs, e, props=props)
class Condenser(interpreter.Evaluator):
"""FPCore condenser.
Remove explicit annotations that are known to be redundant.
This does not result in a minimal set of annotations: there
could be places where some annotations could be pulled up
into a parent and merged together. The Minimizer handles
those cases.
"""
@classmethod
def _eval_var(cls, e, ctx):
return e
@classmethod
def _eval_val(cls, e, ctx):
return e
@classmethod
def _eval_if(cls, e, ctx):
return ast.If(
cls.evaluate(e.cond, ctx),
cls.evaluate(e.then_body, ctx),
cls.evaluate(e.else_body, ctx),
)
@classmethod
def _eval_let(cls, e, ctx):
return ast.Let(
[(name, cls.evaluate(expr, ctx)) for name, expr in e.let_bindings],
cls.evaluate(e.body, ctx),
)
@classmethod
def _eval_while(cls, e, ctx):
return ast.While(
cls.evaluate(e.cond, ctx),
[(name, cls.evaluate(init_expr, ctx), cls.evaluate(update_expr, ctx))
for name, init_expr, update_expr in e.while_bindings],
cls.evaluate(e.body, ctx),
)
@classmethod
def _eval_op(cls, e, ctx):
return type(e)(*(cls.evaluate(child, ctx) for child in e.children))
# all of the interesting work is here
@classmethod
def _eval_ctx(cls, e, ctx):
interesting_props = {k:v for k, v in e.props.items() if k not in ctx.props or ctx.props[k] != v}
if interesting_props:
return ast.Ctx(props=interesting_props, body=cls.evaluate(e.body, ctx.let(props=e.props)))
else:
return cls.evaluate(e.body, ctx.let(props=e.props))
# translator interface
@classmethod
def translate(cls, core, ctx=None, recurse={'pre', 'spec'}):
if ctx is None:
ctx = cls.ctype(props=core.props)
else:
ctx = ctx.let(props=core.props)
inputs = [(name, {k:v for k, v in props.items() if k not in ctx.props or ctx.props[k] != v})
for name, props in core.inputs]
e = cls.evaluate(core.e, ctx)
props = {}
for k, v in core.props.items():
if k in recurse and isinstance(v, ast.Expr):
if k in prop_uses_real_precision:
rectx = ctx.let(props={'precision': ast.Var('real')})
else:
rectx = ctx
props[k] = cls.evaluate(v, rectx)
else:
props[k] = v
return ast.FPCore(inputs, e, props=props)
class Minimizer(interpreter.Evaluator):
"""FPCore minimizer.
Pull all annotations up to the top level, so that each annotation
appears in as few places and is inherited as much as possible.
If an annotation appears on some, but not all, children of a node, it will be
written explicitly for all of the children. Annotations could be
minimized further by choosing the "most popular" annotation in these
cases and carrying that one up to the parent.
"""
@classmethod
def _annotate(cls, e, ctx):
"""Given some subexpression e and its surrounding context, determine
which properties on e are different from the surround context and
thus need to be annotated specifically.
"""
child, childctx, ctx_used = cls.evaluate(e, ctx)
annotations = {}
for prop in childctx.props:
if prop not in ctx.props or childctx.props[prop] != ctx.props[prop]:
annotations[prop] = childctx.props[prop]
if len(annotations) == 0 or not ctx_used:
return child, ctx_used
else:
return ast.Ctx(props=annotations, body=child), ctx_used
@classmethod
def _merge_contexts(cls, children, ctx):
children_ctxs = [cls.evaluate(child, ctx) for child in children]
any_child_used = False
shared = {}
shared_props = {}
annotated = []
for child, childctx, child_used in children_ctxs:
any_child_used = any_child_used or child_used
annotations = {}
for prop in childctx.props:
if prop not in ctx.props or childctx.props[prop] != ctx.props[prop]:
# property differs from surrounding context
# check if it's the same on all children
if prop in shared:
is_shared = shared[prop]
else:
is_shared = True
for other, otherctx, other_used in children_ctxs:
if (other is not child and
other_used and
(prop not in otherctx.props or childctx.props[prop] != otherctx.props[prop])):
is_shared = False
shared[prop] = is_shared
# remember the shared property and its value
shared_props[prop] = childctx.props[prop]
# property is not shared, so we need to to annotate
if not is_shared:
annotations[prop] = childctx.props[prop]
if len(annotations) == 0 or not child_used:
annotated.append(child)
else:
annotated.append(ast.Ctx(props=annotations, body=child))
return annotated, ctx.let(props=shared_props), any_child_used
@classmethod
def _eval_var(cls, e, ctx):
return e, ctx, False
@classmethod
def _eval_val(cls, e, ctx):
# do nothing here; the parent will annotate if necessary
return e, ctx, True
@classmethod
def _eval_if(cls, e, ctx):
(cond, let_body, else_body), ctx, ctx_used = cls._merge_contexts(
[e.cond, e.then_body, e.else_body],
ctx,
)
return ast.If(cond, let_body, else_body), ctx, ctx_used
@classmethod
def _eval_let(cls, e, ctx):
names, child_exprs = zip(*e.let_bindings)
(body, *exprs), ctx, ctx_used = cls._merge_contexts(
[e.body, *child_exprs],
ctx,
)
return ast.Let([*zip(names, exprs)], body), ctx, ctx_used
@classmethod
def _eval_while(cls, e, ctx):
names, child_inits, child_updates = zip(*e.while_bindings)
(cond, body, *exprs), ctx, ctx_used = cls._merge_contexts(
[e.cond, e.body, *child_inits, *child_updates],
ctx,
)
init_exprs = exprs[:len(child_inits)]
update_exprs = exprs[len(child_updates):]
return ast.While(cond, [*zip(names, init_exprs, update_exprs)], body), ctx, ctx_used
@classmethod
def _eval_op(cls, e, ctx):
children_used = (cls._annotate(child, ctx) for child in e.children)
children, used = zip(*children_used)
return type(e)(*children), ctx, any(used) or type(e) not in op_is_boolean
# translator interface
@classmethod
def translate(cls, core, ctx=None, recurse={'pre', 'spec'}):
if ctx is None:
ctx = cls.ctype(props=core.props)
else:
ctx = ctx.let(props=core.props)
e, ctx, ctx_used = cls.evaluate(core.e, ctx)
inputs = []
for name, props in core.inputs:
annotations = {}
for k, v in props.items():
if k not in ctx.props or v != ctx.props[k]:
annotations[k] = v
inputs.append((name, annotations))
reprops = {}
for prop in recurse:
if prop in ctx.props and isinstance(ctx.props[prop], ast.Expr):
if prop in prop_uses_real_precision:
local_ctx = ctx.let(props={'precision': ast.Var('real')})
else:
local_ctx = ctx
re, rectx, rectx_used = cls.evaluate(ctx.props[prop], local_ctx)
if rectx_used:
annotations = {}
for k, v in rectx.props.items():
if prop in prop_uses_real_precision and k == 'precision':
if str(v) != 'real':
annotations[k] = v
elif k not in ctx.props or v != ctx.props[k]:
annotations[k] = v
if annotations:
reprops[prop] = ast.Ctx(props=annotations, body=re)
else:
reprops[prop] = re
else:
reprops[prop] = re
# technically, we should run a full merge on all of the inputs and the
# recurse props, but eh
return ast.FPCore(inputs, e, props=ctx.let(props=reprops).props)
|
# Copyright (c) 2020 vesoft inc. All rights reserved.
#
# This source code is licensed under Apache 2.0 License,
# attached with Common Clause Condition 1.0, found in the LICENSES directory.
def __bytes2ul(b):
return int.from_bytes(b, byteorder='little', signed=False)
def mmh2(bstr, seed=0xc70f6907, signed=True):
MASK = 2 ** 64 - 1
size = len(bstr)
m = 0xc6a4a7935bd1e995
r = 47
h = seed ^ (size * m & MASK)
end = size & (0xfffffff8)
for pos in range(0, end, 8):
k = __bytes2ul(bstr[pos:pos+8])
k = k * m & MASK
k = k ^ (k >> r)
k = k * m & MASK;
h = h ^ k
h = h * m & MASK
left = size & 0x7
if left >= 7:
h = h ^ (bstr[end+6] << 48)
if left >= 6:
h = h ^ (bstr[end+5] << 40)
if left >= 5:
h = h ^ (bstr[end+4] << 32)
if left >= 4:
h = h ^ (bstr[end+3] << 24)
if left >= 3:
h = h ^ (bstr[end+2] << 16)
if left >= 2:
h = h ^ (bstr[end+1] << 8)
if left >= 1:
h = h ^ bstr[end+0]
h = h * m & MASK
h = h ^ (h >> r)
h = h * m & MASK
h = h ^ (h >> r)
if signed:
h = h | (-(h & 0x8000000000000000))
return h
if __name__ == '__main__':
assert mmh2(b'hello') == 2762169579135187400
assert mmh2(b'World') == -295471233978816215
assert mmh2(b'Hello World') == 2146989006636459346
assert mmh2(b'Hello Wo') == -821961639117166431
|
frase = input('Insíra a frase desejada: ').lower().strip()
print('A letra"A" aparece {} vezes nessa frase!'.format(frase.count('a')))
print('A letra "A" aparece pela primeira vez na posição {} e pela última vez na posição {}'.format(frase.find('a'), frase.rfind('a')))
|
import hashlib
import hmac
import time
from hyperquant.api import Platform, Sorting, Direction
from hyperquant.clients import Endpoint, WSClient, Trade, ParamName, Error, \
ErrorCode, Channel, \
Info, WSConverter, RESTConverter, PlatformRESTClient, PrivatePlatformRESTClient
# https://docs.bitfinex.com/v1/docs
# https://docs.bitfinex.com/v2/docs
# REST
class BitfinexRESTConverterV1(RESTConverter):
# Main params:
base_url = "https://api.bitfinex.com/v{version}/"
IS_SORTING_ENABLED = False
# Settings:
# Converting info:
# For converting to platform
endpoint_lookup = {
Endpoint.TRADE: "trades/{symbol}",
Endpoint.TRADE_HISTORY: "trades/{symbol}", # same, not implemented for this version
}
param_name_lookup = {
ParamName.LIMIT: "limit_trades",
ParamName.IS_USE_MAX_LIMIT: None,
ParamName.SORTING: None, # not supported
ParamName.FROM_ITEM: "timestamp",
ParamName.TO_ITEM: "timestamp", # ?
ParamName.FROM_TIME: "timestamp",
ParamName.TO_TIME: None, # ?
}
param_value_lookup = {
# Sorting.ASCENDING: None,
# Sorting.DESCENDING: None,
Sorting.DEFAULT_SORTING: Sorting.DESCENDING,
}
max_limit_by_endpoint = {
Endpoint.TRADE: 1000,
Endpoint.TRADE_HISTORY: 1000, # same, not implemented for this version
}
# For parsing
param_lookup_by_class = {
Error: {
"message": "code",
# "error": "code",
# "message": "message",
},
Trade: {
"tid": ParamName.ITEM_ID,
"timestamp": ParamName.TIMESTAMP,
"price": ParamName.PRICE,
"amount": ParamName.AMOUNT,
"type": ParamName.DIRECTION,
},
}
error_code_by_platform_error_code = {
# "": ErrorCode.UNAUTHORIZED,
"Unknown symbol": ErrorCode.WRONG_SYMBOL,
# "ERR_RATE_LIMIT": ErrorCode.RATE_LIMIT,
}
error_code_by_http_status = {
429: ErrorCode.RATE_LIMIT,
}
# For converting time
# is_source_in_milliseconds = True
timestamp_platform_names = [ParamName.TIMESTAMP]
def prepare_params(self, endpoint=None, params=None):
resources, platform_params = super().prepare_params(endpoint, params)
# (SYMBOL was used in URL path) (not necessary)
if platform_params and ParamName.SYMBOL in platform_params:
del platform_params[ParamName.SYMBOL]
return resources, platform_params
def parse(self, endpoint, data):
if data and endpoint == Endpoint.SYMBOLS:
return [item.upper() for item in data]
return super().parse(endpoint, data)
def _parse_item(self, endpoint, item_data):
result = super()._parse_item(endpoint, item_data)
# Convert Trade.direction
if result and isinstance(result, Trade) and result.direction:
# (Can be of "sell"|"buy|"")
result.direction = Direction.SELL if result.direction == "sell" else \
(Direction.BUY if result.direction == "buy" else None)
return result
class BitfinexRESTConverterV2(RESTConverter):
# Main params:
base_url = "https://api.bitfinex.com/v{version}/"
IS_SORTING_ENABLED = True
# Settings:
# Converting info:
# For converting to platform
endpoint_lookup = {
Endpoint.TRADE: "trades/t{symbol}/hist", # same, not implemented for this version
Endpoint.TRADE_HISTORY: "trades/t{symbol}/hist",
}
param_name_lookup = {
ParamName.LIMIT: "limit",
ParamName.IS_USE_MAX_LIMIT: None,
ParamName.SORTING: "sort",
ParamName.FROM_ITEM: "start",
ParamName.TO_ITEM: "end",
ParamName.FROM_TIME: "start",
ParamName.TO_TIME: "end",
}
param_value_lookup = {
Sorting.ASCENDING: 1,
Sorting.DESCENDING: 0,
Sorting.DEFAULT_SORTING: Sorting.DESCENDING,
}
max_limit_by_endpoint = {
Endpoint.TRADE: 1000, # same, not implemented for this version
Endpoint.TRADE_HISTORY: 1000,
}
# For parsing
param_lookup_by_class = {
# ["error",10020,"limit: invalid"]
Error: ["", "code", "message"],
# on trading pairs (ex. tBTCUSD) [ID, MTS, AMOUNT, PRICE]
# [305430435,1539757383787,-0.086154,6760.7]
# (on funding currencies (ex. fUSD) [ID, MTS, AMOUNT, RATE, PERIOD]) - not used now
Trade: [ParamName.ITEM_ID, ParamName.TIMESTAMP, ParamName.AMOUNT, ParamName.PRICE],
}
error_code_by_platform_error_code = {
# "": ErrorCode.UNAUTHORIZED,
10020: ErrorCode.WRONG_LIMIT,
11010: ErrorCode.RATE_LIMIT,
}
error_code_by_http_status = {}
# For converting time
is_source_in_milliseconds = True
timestamp_platform_names = ["start", "end"]
def prepare_params(self, endpoint=None, params=None):
# # Symbol needs "t" prefix for trading pair
# if ParamName.SYMBOL in params:
# params[ParamName.SYMBOL] = "t" + str(params[ParamName.SYMBOL])
resources, platform_params = super().prepare_params(endpoint, params)
# (SYMBOL was used in URL path) (not necessary)
if platform_params and ParamName.SYMBOL in platform_params:
del platform_params[ParamName.SYMBOL]
return resources, platform_params
def _process_param_value(self, name, value):
# # Symbol needs "t" prefix for trading pair
# if name == ParamName.SYMBOL and value:
# return "t" + value
# elif
if name == ParamName.FROM_ITEM or name == ParamName.TO_ITEM:
if isinstance(value, Trade):
return value.timestamp
return super()._process_param_value(name, value)
def _parse_item(self, endpoint, item_data):
result = super()._parse_item(endpoint, item_data)
if result and isinstance(result, Trade):
# Determine direction
result.direction = Direction.BUY if result.amount > 0 else Direction.SELL
# Stringify and check sign
result.price = str(result.price)
result.amount = str(result.amount) if result.amount > 0 else str(-result.amount)
return result
def parse_error(self, error_data=None, response=None):
result = super().parse_error(error_data, response)
if error_data and isinstance(error_data, dict) and "error" in error_data:
if error_data["error"] == "ERR_RATE_LIMIT":
result.error_code = ErrorCode.RATE_LIMIT
result.message = ErrorCode.get_message_by_code(result.code) + result.message
return result
class BitfinexRESTClient(PrivatePlatformRESTClient):
platform_id = Platform.BITFINEX
version = "2" # Default version
_converter_class_by_version = {
"1": BitfinexRESTConverterV1,
"2": BitfinexRESTConverterV2,
}
def get_symbols(self, version=None):
self.logger.info("Note: Bitfinex supports get_symbols only in v1.")
return super().get_symbols(version="1")
# # after_timestamp param can be added for v1, and after_timestamp, before_timestamp for v2
# def fetch_trades(self, symbol, limit=None, **kwargs):
# return super().fetch_trades(symbol, limit, **kwargs)
# v1: Same as fetch_trades(), but result can be only reduced, but not extended
def fetch_trades_history(self, symbol, limit=None, from_item=None,
sorting=None, from_time=None, to_time=None, **kwargs):
if from_item and self.version == "1":
# todo check
self.logger.warning("Bitfinex v1 API has no trades-history functionality.")
return None
# return self.fetch_trades(symbol, limit, **kwargs)
return super().fetch_trades_history(symbol, limit, from_item, sorting=sorting,
from_time=from_time, to_time=to_time, **kwargs)
def _on_response(self, response, result):
# super()._on_response(response)
if not response.ok and "Retry-After" in response.headers:
self.delay_before_next_request_sec = int(response.headers["Retry-After"])
elif isinstance(result, Error):
if result.code == ErrorCode.RATE_LIMIT:
# Bitfinex API access is rate limited. The rate limit applies if an
# IP address exceeds a certain number of requests per minute. The current
# limit is between 10 and 45 to a specific REST API endpoint (ie. /ticker).
# In case a client reaches the limit, we block the requesting IP address
# for 10-60 seconds on that endpoint. The API will return the JSON response
# {"error": "ERR_RATE_LIMIT"}. These DDoS defenses may change over time to
# further improve reliability.
self.delay_before_next_request_sec = 60
else:
self.delay_before_next_request_sec = 10
# WebSocket
class BitfinexWSConverterV2(WSConverter):
# Main params:
base_url = "wss://api.bitfinex.com/ws/{version}/"
IS_SUBSCRIPTION_COMMAND_SUPPORTED = True
# supported_endpoints = [Endpoint.TRADE]
# symbol_endpoints = [Endpoint.TRADE]
# supported_symbols = None
# Settings:
# Converting info:
# For converting to platform
endpoint_lookup = {
Endpoint.TRADE: "trades",
}
# For parsing
item_class_by_endpoint = dict(**WSConverter.item_class_by_endpoint, **{
# Item class by event type
"error": Error,
"info": Info,
"subscribed": Channel,
})
param_lookup_by_class = {
Error: {
"code": "code",
"msg": "message",
},
Info: {
"code": "code",
"msg": "message",
},
Channel: {
"chanId": "channel_id",
"channel": "channel",
"pair": ParamName.SYMBOL,
},
#
Trade: [ParamName.ITEM_ID, ParamName.TIMESTAMP, ParamName.AMOUNT, ParamName.PRICE],
}
# https://docs.bitfinex.com/v2/docs/abbreviations-glossary
# 10300 : Subscription failed (generic)
# 10301 : Already subscribed
# 10302 : Unknown channel
# 10400 : Unsubscription failed (generic)
# 10401 : Not subscribed
# errors = {10000: 'Unknown event',
# 10001: 'Generic error',
# 10008: 'Concurrency error',
# 10020: 'Request parameters error',
# 10050: 'Configuration setup failed',
# 10100: 'Failed authentication',
# 10111: 'Error in authentication request payload',
# 10112: 'Error in authentication request signature',
# 10113: 'Error in authentication request encryption',
# 10114: 'Error in authentication request nonce',
# 10200: 'Error in un-authentication request',
# 10300: 'Subscription Failed (generic)',
# 10301: 'Already Subscribed',
# 10302: 'Unknown channel',
# 10400: 'Subscription Failed (generic)',
# 10401: 'Not subscribed',
# 11000: 'Not ready, try again later',
# 20000: 'User is invalid!',
# 20051: 'Websocket server stopping',
# 20060: 'Websocket server resyncing',
# 20061: 'Websocket server resync complete'
# }
error_code_by_platform_error_code = {
# 10000: ErrorCode.WRONG_EVENT,
10001: ErrorCode.WRONG_SYMBOL,
# 10305: ErrorCode.CHANNEL_LIMIT,
}
event_type_param = "event"
# For converting time
is_source_in_milliseconds = True
def __init__(self, platform_id=None, version=None):
self.channel_by_id = {}
super().__init__(platform_id, version)
def _generate_subscription(self, endpoint, symbol=None, **params):
channel = super()._generate_subscription(endpoint, symbol, **params)
return (channel, symbol)
def parse(self, endpoint, data):
# if data:
# endpoint = data.get(self.event_type_param)
# if "data" in data:
# data = data["data"]
if isinstance(data, list):
# [284792,[[306971149,1540470353199,-0.76744631,0.031213],...] (1)
# todo add tests
# or [102165,"te",[306995378,1540485961266,-0.216139,0.031165]]
# or [102165,"tu",[306995378,1540485961266,-0.216139,0.031165]] (2)
channel_id = data[0]
channel = self.channel_by_id.get(channel_id)
if channel:
# Get endpoint by channel
endpoint = None
for k, v in self.endpoint_lookup.items():
if v == channel.channel:
endpoint = k
# Parse
if data[1] == "tu":
# Skip "tu" as an item have been already added as "te"
return None
# if data[1] == "te":
# # Skip "te" as an item has no id yet, waiting for "tu" (actually there is an id already)
# return None
# (data[1] - for v1, data[1] or [data[2]] - for v2, see above (1) and (2) examples)
real_data = data[1] if isinstance(data[1], list) else [data[2]]
result = super().parse(endpoint, real_data)
# Set symbol
for item in result:
if hasattr(item, ParamName.SYMBOL):
item.symbol = channel.symbol
return result
return super().parse(endpoint, data)
def _parse_item(self, endpoint, item_data):
result = super()._parse_item(endpoint, item_data)
if isinstance(result, Channel):
self.channel_by_id[result.channel_id] = result
elif result and isinstance(result, Trade):
if result.symbol and result.symbol.begins_with("."):
return None
if not result.item_id:
result.item_id = "%s_%s_%s" % (result.timestamp, result.price, result.amount)
# Determine direction
result.direction = Direction.BUY if result.amount > 0 else Direction.SELL
# Stringify and check sign
result.price = str(result.price)
result.amount = str(result.amount) if result.amount > 0 else str(-result.amount)
return result
# (not necessary)
class BitfinexWSConverterV1(BitfinexWSConverterV2):
# Main params:
base_url = "wss://api.bitfinex.com/ws/{version}/"
# # Settings:
#
# # Converting info:
# # For converting to platform
# endpoint_lookup = {
# Endpoint.TRADE: "trades",
# }
# For parsing
param_lookup_by_class = {
Error: {
"code": "code",
"msg": "message",
},
Info: {
"code": "code",
"msg": "message",
},
Channel: {
"channel": "channel",
"chanId": "channel_id",
"pair": ParamName.SYMBOL,
},
# [ 5, "te", "1234-BTCUSD", 1443659698, 236.42, 0.49064538 ]
# Trade: ["", "", ParamName.ITEM_ID, ParamName.TIMESTAMP, ParamName.PRICE, ParamName.AMOUNT],
Trade: [ParamName.ITEM_ID, ParamName.TIMESTAMP, ParamName.PRICE, ParamName.AMOUNT],
}
# # 10300 : Subscription failed (generic)
# # 10301 : Already subscribed
# # 10302 : Unknown channel
# # 10400 : Unsubscription failed (generic)
# # 10401 : Not subscribed
# error_code_by_platform_error_code = {
# # 10000: ErrorCode.WRONG_EVENT,
# 10001: ErrorCode.WRONG_SYMBOL,
# }
#
# # For converting time
# # is_source_in_milliseconds = True
# def parse_item(self, endpoint, item_data):
# result = super().parse_item(endpoint, item_data)
#
# # Convert Channel.symbol "tXXXYYY" -> "XXXYYY"
# if result and isinstance(result, Channel) and result.symbol:
# if result.symbol[0] == "t":
# result.symbol = result.symbol[1:]
#
# return result
class BitfinexWSClient(WSClient):
# TODO consider reconnection and resubscription
# TODO consider reconnect on connection, pong and other timeouts
# Settings:
platform_id = Platform.BITFINEX
version = "2" # Default version
_converter_class_by_version = {
"1": BitfinexWSConverterV1,
"2": BitfinexWSConverterV2,
}
# State:
def _send_subscribe(self, subscriptions):
for channel, symbol in subscriptions:
trading_pair_symbol = "t" + symbol
event_data = {
"event": "subscribe",
"channel": channel,
"symbol": trading_pair_symbol}
self._send(event_data)
def _parse(self, endpoint, data):
if isinstance(data, list) and len(data) > 1 and data[1] == "hb":
# Heartbeat. skip for now...
return None
return super()._parse(endpoint, data)
# Закомментированные методы можно свободно удалять, если проще переносить код из другой библиотеки заново
# def on_item_received(self, item):
# # if isinstance(item, Channel):
# # self.channel_by_id[item.channel_id] = item
# # return
# #
# super().on_item_received(item)
#
# # # Handle data
# # if isinstance(data, dict):
# # # This is a system message
# # self._system_handler(data, received_at)
# # else:
# # # This is a list of data
# # if data[1] == 'hb':
# # self._heartbeat_handler()
# # else:
# # self._data_handler(data, received_at)
# def _system_handler(self, data, ts):
# """Distributes system messages to the appropriate handler.
# System messages include everything that arrives as a dict,
# or a list containing a heartbeat.
# :param data:
# :param ts:
# :return:
# """
# self.log.debug("_system_handler(): Received a system message: %s", data)
# # Unpack the data
# event = data.pop('event')
# if event == 'pong':
# self.log.debug("_system_handler(): Distributing %s to _pong_handler..",
# data)
# self._pong_handler()
# elif event == 'info':
# self.log.debug("_system_handler(): Distributing %s to _info_handler..",
# data)
# self._info_handler(data)
# elif event == 'error':
# self.log.debug("_system_handler(): Distributing %s to _error_handler..",
# data)
# self._error_handler(data)
# elif event in ('subscribed', 'unsubscribed', 'conf', 'auth', 'unauth'):
# self.log.debug("_system_handler(): Distributing %s to "
# "_response_handler..", data)
# self._response_handler(event, data, ts)
# else:
# self.log.error("Unhandled event: %s, data: %s", event, data)
# if event_name in ('subscribed', 'unsubscribed', 'conf', 'auth', 'unauth'):
# try:
# self._response_handlers[event_name](event_name, data, ts)
# except KeyError:
# self.log.error("Dtype '%s' does not have a response "
# "handler! (%s)", event_name, message)
# elif event_name == 'data':
# try:
# channel_id = data[0]
# if channel_id != 0:
# # Get channel type associated with this data to the
# # associated data type (from 'data' to
# # 'book', 'ticker' or similar
# channel_type, *_ = self.channel_directory[channel_id]
#
# # Run the associated data handler for this channel type.
# self._data_handlers[channel_type](channel_type, data, ts)
# # Update time stamps.
# self.update_timestamps(channel_id, ts)
# else:
# # This is data from auth channel, call handler
# self._handle_account(data=data, ts=ts)
# except KeyError:
# self.log.error("Channel ID does not have a data handler! %s",
# message)
# else:
# self.log.error("Unknown event_name on queue! %s", message)
# continue
# self._response_handlers = {'unsubscribed': self._handle_unsubscribed,
# 'subscribed': self._handle_subscribed,
# 'conf': self._handle_conf,
# 'auth': self._handle_auth,
# 'unauth': self._handle_auth}
# self._data_handlers = {'ticker': self._handle_ticker,
# 'book': self._handle_book,
# 'raw_book': self._handle_raw_book,
# 'candles': self._handle_candles,
# 'trades': self._handle_trades}
# https://github.com/Crypto-toolbox/btfxwss/blob/master/btfxwss/queue_processor.py
# def _handle_subscribed(self, dtype, data, ts,):
# """Handles responses to subscribe() commands.
# Registers a channel id with the client and assigns a data handler to it.
# :param dtype:
# :param data:
# :param ts:
# :return:
# """
# self.log.debug("_handle_subscribed: %s - %s - %s", dtype, data, ts)
# channel_name = data.pop('channel')
# channel_id = data.pop('chanId')
# config = data
#
# if 'pair' in config:
# symbol = config['pair']
# if symbol.startswith('t'):
# symbol = symbol[1:]
# elif 'symbol' in config:
# symbol = config['symbol']
# if symbol.startswith('t'):
# symbol = symbol[1:]
# elif 'key' in config:
# symbol = config['key'].split(':')[2][1:] #layout type:interval:tPair
# else:
# symbol = None
#
# if 'prec' in config and config['prec'].startswith('R'):
# channel_name = 'raw_' + channel_name
#
# self.channel_handlers[channel_id] = self._data_handlers[channel_name]
#
# # Create a channel_name, symbol tuple to identify channels of same type
# if 'key' in config:
# identifier = (channel_name, symbol, config['key'].split(':')[1])
# else:
# identifier = (channel_name, symbol)
# self.channel_handlers[channel_id] = identifier
# self.channel_directory[identifier] = channel_id
# self.channel_directory[channel_id] = identifier
# self.log.info("Subscription succesful for channel %s", identifier)
#
# def _handle_unsubscribed(self, dtype, data, ts):
# """Handles responses to unsubscribe() commands.
# Removes a channel id from the client.
# :param dtype:
# :param data:
# :param ts:
# :return:
# """
# self.log.debug("_handle_unsubscribed: %s - %s - %s", dtype, data, ts)
# channel_id = data.pop('chanId')
#
# # Unregister the channel from all internal attributes
# chan_identifier = self.channel_directory.pop(channel_id)
# self.channel_directory.pop(chan_identifier)
# self.channel_handlers.pop(channel_id)
# self.last_update.pop(channel_id)
# self.log.info("Successfully unsubscribed from %s", chan_identifier)
#
# def _handle_auth(self, dtype, data, ts):
# """Handles authentication responses.
# :param dtype:
# :param data:
# :param ts:
# :return:
# """
# # Contains keys status, chanId, userId, caps
# if dtype == 'unauth':
# raise NotImplementedError
# channel_id = data.pop('chanId')
# user_id = data.pop('userId')
#
# identifier = ('auth', user_id)
# self.channel_handlers[identifier] = channel_id
# self.channel_directory[identifier] = channel_id
# self.channel_directory[channel_id] = identifier
# def _handle_trades(self, dtype, data, ts):
# """Files trades in self._trades[chan_id].
# :param dtype:
# :param data:
# :param ts:
# :return:
# """
# self.log.debug("_handle_trades: %s - %s - %s", dtype, data, ts)
# channel_id, *data = data
# channel_identifier = self.channel_directory[channel_id]
# entry = (data, ts)
# self.trades[channel_identifier].put(entry)
def _send_auth(self):
# Generate nonce
auth_nonce = str(int(time.time() * 10000000))
# Generate signature
auth_payload = "AUTH" + auth_nonce
auth_sig = hmac.new(self._api_secret.encode(), auth_payload.encode(),
hashlib.sha384).hexdigest()
payload = {"event": "auth", "apiKey": self._api_key, "authSig": auth_sig,
"authPayload": auth_payload, "authNonce": auth_nonce}
self._send(payload)
# # Auth v1:
# import hmac
# import hashlib
# import time
#
# nonce = int(time.time() * 1000000)
# auth_payload = "AUTH" + str(nonce)
# signature = hmac.new(
# API_SECRET.encode(),
# msg = auth_payload.encode(),
# digestmod = hashlib.sha384
# ).hexdigest()
#
# payload = {
# "apiKey": API_KEY,
# "event": "auth",
# "authPayload": auth_payload,
# "authNonce": nonce,
# "authSig": signature
# }
#
# ws.send(json.dumps(payload))
# https://github.com/bitfinexcom/bitfinex-api-node
# How do te and tu messages differ?
# A te packet is sent first to the client immediately after a trade has been
# matched & executed, followed by a tu message once it has completed processing.
# During times of high load, the tu message may be noticably delayed, and as
# such only the te message should be used for a realtime feed.
|
import os
from bridgedata.models.gcbc_images import GCBCImages
import numpy as np
from bridgedata.utils.general_utils import AttrDict
current_dir = os.path.dirname(os.path.realpath(__file__))
from bridgedata_experiments.dataset_lmdb import TOTAL_NUM_TASKS_ALIASING, task_name_aliasing_dict, bridge_data_config
from widowx_envs.utils.datautils.lmdb_dataloader import LMDB_Dataset_Pandas
configuration = AttrDict(
main=AttrDict(
model=GCBCImages,
max_iterations=400000,
),
)
bridge_data_config_kitchen1_aliasing = AttrDict(
name='toykitchen1',
random_crop=[96, 128],
color_augmentation=0.1,
image_size_beforecrop=[112, 144],
data_dir=os.environ['DATA'] + '/robonetv2/toykitchen_fixed_cam/',
filtering_function=[lambda dframe: dframe[(dframe['environment'] == 'toykitchen1')]],
aliasing_dict=task_name_aliasing_dict,
)
validation_conf_toykitchen1_aliasing = AttrDict(
val0=AttrDict(
dataclass=LMDB_Dataset_Pandas,
dataconf=bridge_data_config_kitchen1_aliasing
),
)
data_config = AttrDict(
main=AttrDict(
dataclass=LMDB_Dataset_Pandas,
dataconf=bridge_data_config,
**validation_conf_toykitchen1_aliasing
)
)
model_config = AttrDict(
main=AttrDict(
action_dim=7,
state_dim=7,
resnet='resnet34',
task_id_conditioning=TOTAL_NUM_TASKS_ALIASING,
img_sz=[96, 128]
)
) |
# -*- coding: utf-8 -*-
'''
Created on 24.2.2009
@author: Jaakko Lintula <jaakko.lintula@iki.fi>
'''
import connection, events
import time, logging
Event = events.Event
LOG_FILENAME = "pyircfs.log"
#logging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG, format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
# datefmt='%m-%d %H:%M:%S',)
CHANCHARS = '*#+!&'
def is_channel(target):
return target[0] in CHANCHARS
class ConnectionError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Handler:
def _find_handler_classes(self, key):
"""searches for a given key in classes in the events.py module
@return a list of (command, handler class) tuples"""
# there has to be a better way :O
classes = [] # list of (command, handler class) tuples
for i in events.__dict__.keys():
if hasattr(events.__dict__[i], key):
for j in getattr(events.__dict__[i], key):
classes.append((j, events.__dict__[i]))
return classes
def _get_handlers(self, htype, command):
"""returns a list of eventstore objects for given message, instantiates
ones from the handler class if needed and adds all other commands the same
class is able to receive to the object list
@param htype either 'reply' or 'command'
@param command the command/reply the store should be interested in
@return list of objects to send an event to"""
if htype == 'reply':
hlist = self.reply_handler_classes
slist = self.reply_stores
elif htype == 'command':
hlist = self.command_handler_classes
slist = self.command_stores
else:
return []
classes = []
objects = []
wildcard_added = False
# search if there are suitable objects already somewhere and use them if possible:
for i in slist:
if i[0] == command:
objects.append(i[1])
if i[0] == '*':
objects.append(i[1])
wildcard_added = True
if (len(objects) > 1 and wildcard_added) or (objects and not wildcard_added):
return objects
for i in hlist: # ok then, search for classes
if i[0] == command: # search for corresponding store objects
if not issubclass(i[1], events.PrivmsgStore):
# Privmsg/channel stores are created only on
# PRIVMSG/NOTICE, not in _handle_server_message
classes.append(i[1])
# now, classes contains only those classes that don't have instantiated objects
for i in classes:
# print "we are here with ", i
obj = self._create_new_store(i) # create a new store instance from the class
# find out what other commands and replies the same class
# supports and add them too:
for j in self.reply_handler_classes:
if j[1] == i:
self.reply_stores.append((j[0], obj))
for j in self.command_handler_classes:
if j[1] == i:
self.command_stores.append((j[0], obj))
#slist.append((command, obj))
objects.append(obj)
return objects
def _get_free_id(self):
"""returns a free unique id"""
self._next_id += 1
return self._next_id
def _create_new_store(self, class_, *args, **kwargs):
"""creates a new eventstore object and assigns it an unique id.
@param class_ the class to create an instance from
@param *args, **kwargs are passed to the class
@return the created object"""
id = self._get_free_id()
obj = class_(id=id, handler=self, *args, **kwargs)
self.all_stores[id] = obj
[x(obj) for x in self.new_store_callbacks]
return obj
def __init__(self):
self.command_handler_classes = self._find_handler_classes('command_handlers')
self.reply_handler_classes = self._find_handler_classes('reply_handlers')
self.command_stores = []
self.reply_stores = []
self.privmsg_stores = []
self.all_stores = {}
self.joined_when_disconnected = []
self.new_store_callbacks = []
self._next_id = 0
self.connection = None
self.connection_status = (0, '')
self.connection_status_timestamp = 0
self.nicknames = []
self.username = ""
self.nickname = ""
def connect(self, server, nicknames, username, realname, port=6667, password=""):
"""Tries to connect to the IRC server."""
#self.nickname = nickname
self.nicknames = nicknames
self.username = username
self.realname = realname
self.server = server
self.port = port
#self.connection = connection.connect(self, server, port)
self.connection = connection.Connection(server, port,
self.receive_message,
self.receive_status)
self.connection.start()
while not self.connection_status[0] == 1:
time.sleep(0.2)
if self.connection_status[0] == 103:
raise ConnectionError(self.connection_status[1])
if password:
self.send_command('PASS', password)
self.send_command('NICK', nicknames[0])
self.send_command('USER', '%s 0 * :%s' % (username, realname))
def __str__(self):
ret = "i'm a handler"
if self.connection:
ret += ", connected as %s" % self.connection
return ret
def remove_store(self, id):
"""removes all references to a store"""
try:
store = self.all_stores[id]
except KeyError:
raise ValueError("unknown store")
self.all_stores.pop(id)
while store in self.privmsg_stores:
self.privmsg_stores.remove(store)
for storelist in [self.reply_stores, self.command_stores]:
to_remove = []
for i in storelist:
if i[1] == store:
to_remove.append(i)
for i in to_remove:
storelist.remove(i)
# and finally tell the store about it
store.remove()
def get_store_id(self, store):
for id in self.all_stores:
if self.all_stores[id] == store:
return id
return None
def close(self):
self.connection.close()
def receive_message(self, message):
"""handles messages coming from the connection and hands them to
_handle_privmsg or _handle_server_message depending on message type"""
logging.debug("receive_message: received %s" % message)
tmp = message.split(' ')
# parses the received message to prefix/cmd/params:
if message[0] == ":":
prefix = tmp[0]
cmd = tmp[1]
params = ' '.join(tmp[2:])
else:
prefix = ""
cmd = tmp[0]
params = ' '.join(tmp[1:])
ev = Event(prefix=prefix, command=cmd, params=params)
#print "RECV: prefix %s cmd %s params %s " % (prefix, cmd, params)
if cmd == 'JOIN':
# JOINs are a special case
# - we need to create a privmsg store for them if one doesn't
# exist
self._get_privmsg_handlers(params[1:])
# now a store is created for the channel if one didn't exist
# already - we don't need the actual instance anywhere in here,
# but now _handle_server_message has somewhere to send the JOIN too
if cmd in ["PRIVMSG", "NOTICE"]:
self._handle_privmsg(ev)
else:
self._handle_server_message(ev)
def _handle_privmsg(self, event):
logging.debug("_handle_privmsg: event %s" % event)
#if event.params[0] in '*#+!&':
if is_channel(event.params[0]):
target = event.params.split()[0]
else:
try:
target = event.prefix[1:event.prefix.index('!')]
except ValueError: # no nickname could be found
logging.debug("hmm? couldn't extract nickname from event")
return
stores = self._get_privmsg_handlers(target)
[store.add_event(event) for store in stores]
def _get_privmsg_handlers(self, target):
logging.debug("ENTER _get_privmsg_handlers, target: %s" % target)
s = [x for x in self.privmsg_stores if x.target.lower() == target.lower()]
if not s:
logging.debug("_get_privmsg_handlers: no existing store found")
if is_channel(target[0]):
s.append(self._create_new_store(events.ChannelStore, target=target, name="_"+target))
replies = events.ChannelStore.reply_handlers
else:
s.append(self._create_new_store(events.PrivmsgStore, target=target, name="_"+target))
replies = events.PrivmsgStore.reply_handlers
self.privmsg_stores.append(s[-1])
for r in replies:
self.reply_stores.append((r, s[-1])) # TODO ADD ID
for i in self.reply_stores:
if i[0] == '*':
s.append(i[1])
logging.debug("_get_privmsg_handlers: returning stores: %s" % [str(x) for x in s])
return s
def _handle_server_message(self, event):
handlers = self._get_handlers('reply', event.command)
for h in handlers:
answer = h.add_event(event)
if answer:
[self.connection.send(msg) for msg in answer]
def send_command(self, command, params):
if self.connection_status[0] not in (1, 10) or \
(self.connection_status[0] == 1 and \
command not in ['PASS', 'USER', 'NICK']):
raise ConnectionError("not connected")
command = command.upper()
handlers = self._get_handlers('command', command)
if not handlers:
raise ValueError("unknown command")
for h in handlers:
to_send = h.generate_event(command, params)
if to_send:
for msg in to_send:
self.connection.send(msg)
def send_message(self, target, message, type="PRIVMSG"):
logging.debug("ENTER send_message: target %s message %s type %s" % (target, message, type))
#if message.startswith('wait'):
# time.sleep(10)
if not self.connection_status[0] == 10:
raise ConnectionError("not connected")
store = self._get_privmsg_handlers(target)[0]
logging.debug("send_message: store resolved as %s" % store)
to_send = store.generate_event(type, message)
logging.debug("send_message: to_send: %s" % to_send)
if to_send:
for msg in to_send:
self.connection.send(msg)
def send_notice(self, target, message):
self.send_message(target, message, type="NOTICE")
def create_privmsg_store(self, target):
self._get_privmsg_handlers(target)
def create_command_store(self, target):
if not self._get_handlers('command', target.upper()):
raise ValueError('unknown command')
def receive_status(self, statusno, statusdesc):
"""receives a tuple of status messages (number, description) from the connection object
and (maybe) acts accordingly
0: not connected
1: connecting (socket opened)
10: connection open and free to use
100: disconnected by user request
101: disconnected by server
102: disconnected for some other reason (?)
103: network error when connecting
104: all nicknames in use
105: bad server password
messages can be sent only when status == 10
(USER, PASS and NICK may be sent when status == 1)
"""
#print "sain jotain statusta: %s %s" % (statusno, statusdesc)
self.connection_status_timestamp = time.time()
self.connection_status = (statusno, statusdesc)
# when disconnected, save names of channels that were joined at the
# time, and send an informational event to them
if self.connection_status[0] in (100, 101, 102):
for i in self.privmsg_stores:
if isinstance(i, events.ChannelStore):
if i.joined:
self.joined_when_disconnected.append(i.target)
i.joined = False
disconnect_event = Event(prefix="", command="", params=statusdesc,
generated=True, informational=True)
for i in self.all_stores:
self.all_stores[i].add_event(disconnect_event)
def reconnect(self):
"""if disconnected, reconnects to a server an rejoins channels
"""
if self.connection_status[0] == 10:
raise ValueError("already connected!")
self.connect(self.server, self.nicknames, self.username, self.realname, self.port)
for channel in self.joined_when_disconnected:
self.send_command('JOIN', channel)
self.joined_when_disconnected = []
def list_reply_stores(self):
"""returns list of unique reply stores"""
names = []
stores = []
for i in self.reply_stores:
if not i[1] in stores:
names.append(i[1].name)
stores.append(i[1])
return dict(zip(names, stores)) # I suppose names are unique
def list_command_stores(self):
"""returns list of unique command stores"""
names = []
stores = []
for i in self.command_stores:
if not i[1] in stores and not (hasattr(i[1], 'internal') and i[1].internal):
# do not list stores that are there already, and don't list those
# that are "internal" either (particularly PingES)
names.append(i[0])
stores.append(i[1])
return dict(zip([x.lower() for x in names], stores))
def list_privmsg_stores(self, filter=None):
"""returns list of unique privmsg stores
@param filter return only privmsg or channels if 'privmsg' or 'channel'"""
d = {}
for i in self.privmsg_stores:
if filter == 'privmsg':
if isinstance(i, events.PrivmsgStore):
d[i.target] = i
elif filter == 'channel':
if isinstance(i, events.ChannelStore):
d[i.target] = i
else:
d[i.target] = i
return d
def list_info_stores(self):
"""returns list of reply stores that don't take any commands,
aka "informational" stores (errors, etc?)"""
names = []
stores = []
for i in self.reply_stores:
found = False
for j in self.command_stores:
if j[1] == i[1]:
found = True
for j in self.privmsg_stores:
if j == i[1]:
found = True
if not found:
names.append(i[1].name)
stores.append(i[1])
return dict(zip(names, stores))
|
import matplotlib.pyplot as plt
def diagram(list_: list, file: str = None) -> None:
"""
Function for create plot with diagram and save to file.
:param file: str - path to file with diagram
:param list_: list - list for creating diagram
:return None
"""
labels = [val for val in range(1, len(list_) + 1)]
width = 0.35
fig, ax = plt.subplots()
ax.bar(labels, list_, width)
ax.set_ylabel('Value')
ax.set_title('Test diagram')
ax.legend()
plt.savefig(file)
|
from django.shortcuts import render
def teste(request):
return render(request, 'sobre/bla.html')
|
import sys
sys.path.append('../')
import globals
def removeEndDigit(name):
return name.strip("0123456789")
class outputHandler():
constraints = {}
mergedConstraints = {}
message = ""
heading = """#NS Output
set ns [new Simulator]
source tb_compat.tcl
"""
footer = """$ns rtproto Session
$ns run"""
def __init__(self):
pass
def produceOutput(self):
print("Called output handler!")
self.produceNSOutput()
try:
globals.app.clearMessage("output")
globals.app.setMessage("output", self.message)
print(self.message)
except Exception as e:
print(e)
pass
def expandConstraints(self):
self.constraints = {}
# XXX This is a lot of extra work to redo the globals constraint dictionary
# to include only the types we deal with for constraints right now and
# to expand cases where the main node name (e.g. 'fred') has a constraint of 'num'
# and deal with cases when there are specific constraints of some of these (e.g. an 'os' for 'fred1')
for n in globals.nodes:
self.constraints[n] = {}
for type in ['os', 'nodetype', 'num']:
if n in globals.constraints and type in globals.constraints[n]:
self.constraints[n][type] = globals.constraints[n][type]
elif removeEndDigit(n) in globals.constraints and type in globals.constraints[removeEndDigit(n)]:
self.constraints[n][type] = globals.constraints[removeEndDigit(n)][type]
def mergeConstraints(self):
# After we've expanded constraints, we can merge ones that are the same.
self.mergedConstraints = {}
covered = []
for n in self.constraints:
#if 'num' in self.constraints[n]:
# numWSame = int(self.constraints[n]['num'])
#else:
numWSame = 1
if n in covered:
continue
for x in self.constraints:
if x in covered:
continue
# Don't compare our self to our self (no point), and don't compare two things with different base names.
if n != x and removeEndDigit(n) == removeEndDigit(x):
# If we find the same name-start + all the same constraints are the same, we can merge these.
sameConstraints = True
for type in self.constraints[n]:
if type not in self.constraints[x] or self.constraints[x][type] != self.constraints[n][type]:
sameConstraints = False
if sameConstraints:
#if 'num' in self.constraints[x]:
# numWSame = numWSame + int(self.constraints[x]['num'])
#else:
# numWSame = int(numWSame) + 1
numWSame = int(numWSame) + 1
covered.append(x)
self.mergedConstraints[n] = {}
self.mergedConstraints[n]['count'] = int(numWSame)
for type in self.constraints[n]:
if type != 'num':
self.mergedConstraints[n][type] = self.constraints[n][type]
covered.append(n)
def produceNSOutput(self):
self.message = self.heading + '\n'
self.expandConstraints()
self.mergeConstraints()
default_os = "Ubuntu1404-64-STD"
default_type = "pc3000"
# Define our nodes.
print(self.mergedConstraints)
for n in self.mergedConstraints:
if 'count' in self.mergedConstraints[n] and int(self.mergedConstraints[n]['count']) > 1:
name = "[format \"%s%%03d\" $i]"% (removeEndDigit(n))
# Make a loop.
self.message = self.message + "for {set i 1} {$i <= %d} {incr i} {"%int(self.mergedConstraints[n]['count'])+'\n'
space = " "
else:
space = ""
name = str(n)
self.message = self.message + space + "set %s [$ns node]\n" % name
if 'os' in self.mergedConstraints[n]:
self.message = self.message + space + "set tb-set-node-os %s %s\n" %(name, self.mergedConstraints[n]['os'])
else:
self.message = self.message + space + "set tb-set-node-os %s %s\n" %(name, default_os)
if 'type' in self.mergedConstraints[n]:
self.message = self.message + space + "set tb-set-hardware %s %s\n" %(name, self.mergedConstraints[n]['type'])
else:
self.message = self.message + space + "set tb-set-hardware %s %s\n" %(name, default_type)
# Close the loop if we need to.
if 'count' in self.mergedConstraints[n] and int(self.mergedConstraints[n]['count']) > 1:
self.message = self.message + "}\n"
#else:
# Defaults.
# self.message = self.message + "set %s [$ns node]\n" % str(n)
# self.message = self.message + "set tb-set-node-os %s %s\n" %(str(n), default_os)
# self.message = self.message + "set tb-set-hardware %s %s\n" %(str(n), default_type)
# Handle links
for a in globals.links:
for b in globals.links[a]:
self.message = self.message + "set link%s-%s [$ns duplex-link $%s $%s 1000Mb 0.0ms DropTail]" % (a, b, a, b)
# Handle lan
for l in globals.lans:
self.message = self.message + "set %s [$ns make-lan " % l
for i in globals.lans[l]:
self.message = self.message + "$" + i + " "
self.message = self.message + "1Mb 0ms]\n"
self.message = self.message + self.footer
def save(self, filename):
self.produceOutput()
print("Called save.")
try:
with open(filename, "w") as text_file:
text_file.write(self.message)
text_file.close()
except Exception as e:
globals.app.infoBox('Problem saving', 'There was an error saving %s: %s' % (filename,e), parent=None)
|
import pytest
from py42._internal.client_factories import MicroserviceClientFactory
from py42._internal.clients.alertrules import AlertRulesClient
from py42._internal.clients.alerts import AlertClient
from py42.modules.alertrules import AlertRulesModule
@pytest.fixture
def mock_microservice_client_factory(mocker):
return mocker.MagicMock(spec=MicroserviceClientFactory)
@pytest.fixture
def mock_alert_rules_client(mocker):
return mocker.MagicMock(spec=AlertRulesClient)
@pytest.fixture
def mock_alerts_client(mocker):
return mocker.MagicMock(spec=AlertClient)
class TestAlertRulesModules(object):
_rule_id = u"test-rule-id"
_user_id = u"test-user-uid"
def test_alert_rules_module_calls_add_user_with_expected_value(
self, mock_microservice_client_factory, mock_alert_rules_client
):
mock_microservice_client_factory.get_alert_rules_client.return_value = (
mock_alert_rules_client
)
alert_rules_module = AlertRulesModule(mock_microservice_client_factory)
alert_rules_module.add_user(self._rule_id, self._rule_id)
mock_alert_rules_client.add_user.assert_called_once_with(
self._rule_id, self._rule_id
)
def test_alert_rules_module_calls_remove_user_with_expected_value(
self, mock_microservice_client_factory, mock_alert_rules_client
):
mock_microservice_client_factory.get_alert_rules_client.return_value = (
mock_alert_rules_client
)
alert_rules_module = AlertRulesModule(mock_microservice_client_factory)
alert_rules_module.remove_user(self._rule_id, self._rule_id)
mock_alert_rules_client.remove_user.assert_called_once_with(
self._rule_id, self._rule_id
)
def test_alert_rules_module_calls_remove_all_users_with_expected_value(
self, mock_microservice_client_factory, mock_alert_rules_client
):
mock_microservice_client_factory.get_alert_rules_client.return_value = (
mock_alert_rules_client
)
alert_rules_module = AlertRulesModule(mock_microservice_client_factory)
alert_rules_module.remove_all_users(self._rule_id)
mock_alert_rules_client.remove_all_users.assert_called_once_with(self._rule_id)
def test_alert_rules_module_calls_get_all_with_expected_value(
self, mock_microservice_client_factory, mock_alerts_client
):
mock_microservice_client_factory.get_alerts_client.return_value = (
mock_alerts_client
)
alert_rules_module = AlertRulesModule(mock_microservice_client_factory)
alert_rules_module.get_all()
assert mock_alerts_client.get_all_rules.call_count == 1
def test_alert_rules_module_calls_get_all_by_name_with_expected_value(
self, mock_microservice_client_factory, mock_alerts_client
):
rule_name = u"test rule"
mock_microservice_client_factory.get_alerts_client.return_value = (
mock_alerts_client
)
alert_rules_module = AlertRulesModule(mock_microservice_client_factory)
alert_rules_module.get_all_by_name(rule_name)
mock_alerts_client.get_all_rules_by_name.assert_called_once_with(rule_name)
def test_alert_rules_module_calls_get_rules_by_observer_id_with_expected_value(
self, mock_microservice_client_factory, mock_alerts_client
):
rule_id = u"test-rule-id"
mock_microservice_client_factory.get_alerts_client.return_value = (
mock_alerts_client
)
alert_rules_module = AlertRulesModule(mock_microservice_client_factory)
alert_rules_module.get_by_observer_id(rule_id)
mock_alerts_client.get_rule_by_observer_id.assert_called_once_with(rule_id)
def test_alert_rules_module_calls_get_rules_page_with_expected_params(
self, mock_microservice_client_factory, mock_alerts_client
):
mock_microservice_client_factory.get_alerts_client.return_value = (
mock_alerts_client
)
alert_rules_module = AlertRulesModule(mock_microservice_client_factory)
alert_rules_module.get_page("key", "dir", 70, 700)
mock_alerts_client.get_rules_page.assert_called_once_with(
sort_key="key", sort_direction="dir", page_num=70, page_size=700
)
|
from __future__ import annotations
from unittest import TestCase
from jsonclasses.exceptions import ValidationException
from tests.classes.enum_user import Gender, EnumUser
from tests.classes.value_gender_user import ValueGender, ValueGenderUser
from tests.classes.lname_gender_user import LnameGender, LnameGenderUser
class TestEnum(TestCase):
def test_enum_is_enum_after_assigned(self):
user = EnumUser(name='Kiên Kiong', gender=Gender.MALE)
self.assertEqual(user._data_dict,
{'name': 'Kiên Kiong', 'gender': Gender.MALE})
def test_enum_assign_raises_if_value_is_not_valid_enum_value(self):
with self.assertRaises(ValidationException) as context:
EnumUser(name='Kiên Kiong', gender=8)
self.assertEqual(context.exception.keypath_messages['gender'],
"unknown enum value")
def test_enum_accepts_uppercase_name_on_assign_by_default(self):
user = EnumUser(name='Kiên Kiong', gender='MALE')
self.assertEqual(user._data_dict,
{'name': 'Kiên Kiong', 'gender': Gender.MALE})
def test_enum_does_not_accept_lowercase_name_on_assign_by_default(self):
with self.assertRaises(ValidationException) as context:
EnumUser(name='Kiên Kiong', gender='male')
self.assertEqual(context.exception.keypath_messages['gender'],
"unknown enum value")
def test_enum_raises_if_value_is_not_valid_enum_value(self):
user = EnumUser(name='Kiên Kiong', gender='MALE')
user.gender = 6
with self.assertRaises(ValidationException) as context:
user.validate()
self.assertEqual(context.exception.keypath_messages['gender'],
"invalid enum value")
def test_enum_raises_if_value_is_not_the_same_enum(self):
user = EnumUser(name='Kiên Kiong', gender='MALE')
user.gender = LnameGender.MALE
with self.assertRaises(ValidationException) as context:
user.validate()
self.assertEqual(context.exception.keypath_messages['gender'],
"invalid enum value")
def test_enum_outputs_to_uppercase_name_by_default(self):
user = EnumUser(name='Nng Li', gender=Gender.MALE)
self.assertEqual(user.tojson(),
{'name': 'Nng Li', 'gender': 'MALE'})
def test_enum_accept_value_if_specified(self):
user = ValueGenderUser(name='Mia', gender=1)
self.assertEqual(user._data_dict,
{'name': 'Mia', 'gender': ValueGender.MALE})
def test_enum_accept_lowercase_name_if_specified(self):
user = LnameGenderUser(name='Mia', gender='male')
self.assertEqual(user._data_dict,
{'name': 'Mia', 'gender': LnameGender.MALE})
def test_enum_outputs_to_value_if_specified(self):
user = ValueGenderUser(name='Nng Li', gender=ValueGender.MALE)
self.assertEqual(user.tojson(),
{'name': 'Nng Li', 'gender': 1})
def test_enum_outputs_to_lowercase_name_if_specified(self):
user = LnameGenderUser(name='Nng Li', gender=LnameGender.MALE)
self.assertEqual(user.tojson(),
{'name': 'Nng Li', 'gender': 'male'})
|
from abc import ABC, abstractmethod
from typing import List
from github import InputGitTreeElement
import discord
from . import format_model
class GitTreeElementCreator(ABC):
"""
Discordモデルを受け取りInputGitTreeElementを作成して返す動作の抽象基底クラス。
"""
@abstractmethod
def create_channel_element(self, channel: discord.abc.GuildChannel) -> InputGitTreeElement:
"""
サーバーのチャンネルを受け取りサーバー設定に依存する情報を抽出しInputGitTreeElementを作成します。
"""
pass
@abstractmethod
def create_role_element(self, role: discord.Role) -> InputGitTreeElement:
"""
サーバーのロールを受け取りサーバー設定に依存する情報を抽出しInputGitTreeElementを作成します。
"""
pass
@abstractmethod
def create_member_element(self, member: discord.Member) -> InputGitTreeElement:
"""
サーバーのメンバーを受け取りサーバー設定に依存する情報を抽出しInputGitTreeElementを作成します。
"""
pass
@abstractmethod
def create_guild_element(self, guild: discord.Guild) -> InputGitTreeElement:
"""
サーバーのメンバーを受け取りサーバー設定に依存する情報を抽出しInpuGitTreeElementを作成します。
"""
pass
class DefaultTreeCreator(GitTreeElementCreator):
"""
Discordモデルを受け取りInputGitTreeElementを作成して返す動作のデフォルト実装を定義します。
"""
def __init__(self,**kwargs):
user_formatter = kwargs.get("formatter")
if user_formatter is not None and not isinstance(user_formatter, format_model.BaseFormatter):
raise NotImplementedError(
"formatter は format_model.BaseFormatter を実装している必要があります。"
)
if user_formatter is None:
self.formatter = format_model.DefaultFormatter()
else:
self.formatter = user_formatter
def create_channel_element(self, channel: discord.abc.GuildChannel) -> InputGitTreeElement:
if channel.category_id is not None:
file_path = f"channels/{channel.category_id}/{channel.id}.json"
else:
file_path = f"channels/{channel.id}.json"
element = InputGitTreeElement(
file_path,
"100644",
"blob",
content=self.formatter.format_channel(channel)
)
return element
def create_member_element(self, member: discord.Member) -> InputGitTreeElement:
file_path = f"members/{member.id}.json"
element = InputGitTreeElement(
file_path,
"100644",
"blob",
content=self.formatter.format_member(member)
)
return element
def create_role_element(self, role: discord.Role) -> InputGitTreeElement:
file_path = f"roles/{role.id}.json"
element = InputGitTreeElement(
file_path,
"100644",
"blob",
content=self.formatter.format_role(role)
)
return element
def create_guild_element(self, guild: discord.Guild) -> InputGitTreeElement:
file_path = "guild_config.json"
element = InputGitTreeElement(
file_path,
"100644",
"blob",
content=self.formatter.format_guild(guild)
)
return element
def create_index_element(self, index_content) -> InputGitTreeElement:
file_path = "index.md"
element = InputGitTreeElement(
file_path,
"100644",
"blob",
content=index_content
)
return element
|
#
# Copyright (c), 2018-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
# --- Auto-generated code: don't edit this file ---
#
# Unicode data version 12.1.0
#
RAW_UNICODE_CATEGORIES = {
'C': [(0, 32), (127, 160), 173, (888, 890), (896, 900), 907, 909, 930, 1328,
(1367, 1369), (1419, 1421), 1424, (1480, 1488), (1515, 1519),
(1525, 1542), (1564, 1566), 1757, (1806, 1808), (1867, 1869),
(1970, 1984), (2043, 2045), (2094, 2096), 2111, (2140, 2142), 2143,
(2155, 2208), 2229, (2238, 2259), 2274, 2436, (2445, 2447), (2449, 2451),
2473, 2481, (2483, 2486), (2490, 2492), (2501, 2503), (2505, 2507),
(2511, 2519), (2520, 2524), 2526, (2532, 2534), (2559, 2561), 2564,
(2571, 2575), (2577, 2579), 2601, 2609, 2612, 2615, (2618, 2620), 2621,
(2627, 2631), (2633, 2635), (2638, 2641), (2642, 2649), 2653,
(2655, 2662), (2679, 2689), 2692, 2702, 2706, 2729, 2737, 2740,
(2746, 2748), 2758, 2762, (2766, 2768), (2769, 2784), (2788, 2790),
(2802, 2809), 2816, 2820, (2829, 2831), (2833, 2835), 2857, 2865, 2868,
(2874, 2876), (2885, 2887), (2889, 2891), (2894, 2902), (2904, 2908),
2910, (2916, 2918), (2936, 2946), 2948, (2955, 2958), 2961, (2966, 2969),
2971, 2973, (2976, 2979), (2981, 2984), (2987, 2990), (3002, 3006),
(3011, 3014), 3017, (3022, 3024), (3025, 3031), (3032, 3046),
(3067, 3072), 3085, 3089, 3113, (3130, 3133), 3141, 3145, (3150, 3157),
3159, (3163, 3168), (3172, 3174), (3184, 3191), 3213, 3217, 3241, 3252,
(3258, 3260), 3269, 3273, (3278, 3285), (3287, 3294), 3295, (3300, 3302),
3312, (3315, 3328), 3332, 3341, 3345, 3397, 3401, (3408, 3412),
(3428, 3430), (3456, 3458), 3460, (3479, 3482), 3506, 3516, (3518, 3520),
(3527, 3530), (3531, 3535), 3541, 3543, (3552, 3558), (3568, 3570),
(3573, 3585), (3643, 3647), (3676, 3713), 3715, 3717, 3723, 3748, 3750,
(3774, 3776), 3781, 3783, (3790, 3792), (3802, 3804), (3808, 3840), 3912,
(3949, 3953), 3992, 4029, 4045, (4059, 4096), 4294, (4296, 4301),
(4302, 4304), 4681, (4686, 4688), 4695, 4697, (4702, 4704), 4745,
(4750, 4752), 4785, (4790, 4792), 4799, 4801, (4806, 4808), 4823, 4881,
(4886, 4888), (4955, 4957), (4989, 4992), (5018, 5024), (5110, 5112),
(5118, 5120), (5789, 5792), (5881, 5888), 5901, (5909, 5920),
(5943, 5952), (5972, 5984), 5997, 6001, (6004, 6016), (6110, 6112),
(6122, 6128), (6138, 6144), (6158, 6160), (6170, 6176), (6265, 6272),
(6315, 6320), (6390, 6400), 6431, (6444, 6448), (6460, 6464),
(6465, 6468), (6510, 6512), (6517, 6528), (6572, 6576), (6602, 6608),
(6619, 6622), (6684, 6686), 6751, (6781, 6783), (6794, 6800),
(6810, 6816), (6830, 6832), (6847, 6912), (6988, 6992), (7037, 7040),
(7156, 7164), (7224, 7227), (7242, 7245), (7305, 7312), (7355, 7357),
(7368, 7376), (7419, 7424), 7674, (7958, 7960), (7966, 7968),
(8006, 8008), (8014, 8016), 8024, 8026, 8028, 8030, (8062, 8064), 8117,
8133, (8148, 8150), 8156, (8176, 8178), 8181, 8191, (8203, 8208),
(8234, 8239), (8288, 8304), (8306, 8308), 8335, (8349, 8352),
(8384, 8400), (8433, 8448), (8588, 8592), (9255, 9280), (9291, 9312),
(11124, 11126), (11158, 11160), 11311, 11359, (11508, 11513), 11558,
(11560, 11565), (11566, 11568), (11624, 11631), (11633, 11647),
(11671, 11680), 11687, 11695, 11703, 11711, 11719, 11727, 11735, 11743,
(11856, 11904), 11930, (12020, 12032), (12246, 12272), (12284, 12288),
12352, (12439, 12441), (12544, 12549), 12592, 12687, (12731, 12736),
(12772, 12784), 12831, (19894, 19904), (40944, 40960), (42125, 42128),
(42183, 42192), (42540, 42560), (42744, 42752), (42944, 42946),
(42951, 42999), (43052, 43056), (43066, 43072), (43128, 43136),
(43206, 43214), (43226, 43232), (43348, 43359), (43389, 43392), 43470,
(43482, 43486), 43519, (43575, 43584), (43598, 43600), (43610, 43612),
(43715, 43739), (43767, 43777), (43783, 43785), (43791, 43793),
(43799, 43808), 43815, 43823, (43880, 43888), (44014, 44016),
(44026, 44032), (55204, 55216), (55239, 55243), (55292, 63744),
(64110, 64112), (64218, 64256), (64263, 64275), (64280, 64285), 64311,
64317, 64319, 64322, 64325, (64450, 64467), (64832, 64848),
(64912, 64914), (64968, 65008), (65022, 65024), (65050, 65056), 65107,
65127, (65132, 65136), 65141, (65277, 65281), (65471, 65474),
(65480, 65482), (65488, 65490), (65496, 65498), (65501, 65504), 65511,
(65519, 65532), (65534, 65536), 65548, 65575, 65595, 65598,
(65614, 65616), (65630, 65664), (65787, 65792), (65795, 65799),
(65844, 65847), 65935, (65948, 65952), (65953, 66000), (66046, 66176),
(66205, 66208), (66257, 66272), (66300, 66304), (66340, 66349),
(66379, 66384), (66427, 66432), 66462, (66500, 66504), (66518, 66560),
(66718, 66720), (66730, 66736), (66772, 66776), (66812, 66816),
(66856, 66864), (66916, 66927), (66928, 67072), (67383, 67392),
(67414, 67424), (67432, 67584), (67590, 67592), 67593, 67638,
(67641, 67644), (67645, 67647), 67670, (67743, 67751), (67760, 67808),
67827, (67830, 67835), (67868, 67871), (67898, 67903), (67904, 67968),
(68024, 68028), (68048, 68050), 68100, (68103, 68108), 68116, 68120,
(68150, 68152), (68155, 68159), (68169, 68176), (68185, 68192),
(68256, 68288), (68327, 68331), (68343, 68352), (68406, 68409),
(68438, 68440), (68467, 68472), (68498, 68505), (68509, 68521),
(68528, 68608), (68681, 68736), (68787, 68800), (68851, 68858),
(68904, 68912), (68922, 69216), (69247, 69376), (69416, 69424),
(69466, 69600), (69623, 69632), (69710, 69714), (69744, 69759), 69821,
(69826, 69840), (69865, 69872), (69882, 69888), 69941, (69959, 69968),
(70007, 70016), (70094, 70096), 70112, (70133, 70144), 70162,
(70207, 70272), 70279, 70281, 70286, 70302, (70314, 70320),
(70379, 70384), (70394, 70400), 70404, (70413, 70415), (70417, 70419),
70441, 70449, 70452, 70458, (70469, 70471), (70473, 70475),
(70478, 70480), (70481, 70487), (70488, 70493), (70500, 70502),
(70509, 70512), (70517, 70656), 70746, 70748, (70752, 70784),
(70856, 70864), (70874, 71040), (71094, 71096), (71134, 71168),
(71237, 71248), (71258, 71264), (71277, 71296), (71353, 71360),
(71370, 71424), (71451, 71453), (71468, 71472), (71488, 71680),
(71740, 71840), (71923, 71935), (71936, 72096), (72104, 72106),
(72152, 72154), (72165, 72192), (72264, 72272), (72355, 72384),
(72441, 72704), 72713, 72759, (72774, 72784), (72813, 72816),
(72848, 72850), 72872, (72887, 72960), 72967, 72970, (73015, 73018),
73019, 73022, (73032, 73040), (73050, 73056), 73062, 73065, 73103, 73106,
(73113, 73120), (73130, 73440), (73465, 73664), (73714, 73727),
(74650, 74752), 74863, (74869, 74880), (75076, 77824), (78895, 82944),
(83527, 92160), (92729, 92736), 92767, (92778, 92782), (92784, 92880),
(92910, 92912), (92918, 92928), (92998, 93008), 93018, 93026,
(93048, 93053), (93072, 93760), (93851, 93952), (94027, 94031),
(94088, 94095), (94112, 94176), (94180, 94208), (100344, 100352),
(101107, 110592), (110879, 110928), (110931, 110948), (110952, 110960),
(111356, 113664), (113771, 113776), (113789, 113792), (113801, 113808),
(113818, 113820), (113824, 118784), (119030, 119040), (119079, 119081),
(119155, 119163), (119273, 119296), (119366, 119520), (119540, 119552),
(119639, 119648), (119673, 119808), 119893, 119965, (119968, 119970),
(119971, 119973), (119975, 119977), 119981, 119994, 119996, 120004,
120070, (120075, 120077), 120085, 120093, 120122, 120127, 120133,
(120135, 120138), 120145, (120486, 120488), (120780, 120782),
(121484, 121499), 121504, (121520, 122880), 122887, (122905, 122907),
122914, 122917, (122923, 123136), (123181, 123184), (123198, 123200),
(123210, 123214), (123216, 123584), (123642, 123647), (123648, 124928),
(125125, 125127), (125143, 125184), (125260, 125264), (125274, 125278),
(125280, 126065), (126133, 126209), (126270, 126464), 126468, 126496,
126499, (126501, 126503), 126504, 126515, 126520, 126522,
(126524, 126530), (126531, 126535), 126536, 126538, 126540, 126544,
126547, (126549, 126551), 126552, 126554, 126556, 126558, 126560, 126563,
(126565, 126567), 126571, 126579, 126584, 126589, 126591, 126602,
(126620, 126625), 126628, 126634, (126652, 126704), (126706, 126976),
(127020, 127024), (127124, 127136), (127151, 127153), 127168, 127184,
(127222, 127232), (127245, 127248), (127341, 127344), (127405, 127462),
(127491, 127504), (127548, 127552), (127561, 127568), (127570, 127584),
(127590, 127744), (128726, 128736), (128749, 128752), (128763, 128768),
(128884, 128896), (128985, 128992), (129004, 129024), (129036, 129040),
(129096, 129104), (129114, 129120), (129160, 129168), (129198, 129280),
129292, 129394, (129399, 129402), (129443, 129445), (129451, 129454),
(129483, 129485), (129620, 129632), (129646, 129648), (129652, 129656),
(129659, 129664), (129667, 129680), (129686, 131072), (173783, 173824),
(177973, 177984), (178206, 178208), (183970, 183984), (191457, 194560),
(195102, 917760), (918000, 1114112)],
'Cc': [(0, 32), (127, 160)],
'Cf': [173, (1536, 1542), 1564, 1757, 1807, 2274, 6158, (8203, 8208),
(8234, 8239), (8288, 8293), (8294, 8304), 65279, (65529, 65532), 69821,
69837, (78896, 78905), (113824, 113828), (119155, 119163), 917505,
(917536, 917632)],
'Cn': [(888, 890), (896, 900), 907, 909, 930, 1328, (1367, 1369), (1419, 1421),
1424, (1480, 1488), (1515, 1519), (1525, 1536), 1565, 1806,
(1867, 1869), (1970, 1984), (2043, 2045), (2094, 2096), 2111,
(2140, 2142), 2143, (2155, 2208), 2229, (2238, 2259), 2436,
(2445, 2447), (2449, 2451), 2473, 2481, (2483, 2486), (2490, 2492),
(2501, 2503), (2505, 2507), (2511, 2519), (2520, 2524), 2526,
(2532, 2534), (2559, 2561), 2564, (2571, 2575), (2577, 2579), 2601,
2609, 2612, 2615, (2618, 2620), 2621, (2627, 2631), (2633, 2635),
(2638, 2641), (2642, 2649), 2653, (2655, 2662), (2679, 2689), 2692,
2702, 2706, 2729, 2737, 2740, (2746, 2748), 2758, 2762, (2766, 2768),
(2769, 2784), (2788, 2790), (2802, 2809), 2816, 2820, (2829, 2831),
(2833, 2835), 2857, 2865, 2868, (2874, 2876), (2885, 2887),
(2889, 2891), (2894, 2902), (2904, 2908), 2910, (2916, 2918),
(2936, 2946), 2948, (2955, 2958), 2961, (2966, 2969), 2971, 2973,
(2976, 2979), (2981, 2984), (2987, 2990), (3002, 3006), (3011, 3014),
3017, (3022, 3024), (3025, 3031), (3032, 3046), (3067, 3072), 3085,
3089, 3113, (3130, 3133), 3141, 3145, (3150, 3157), 3159, (3163, 3168),
(3172, 3174), (3184, 3191), 3213, 3217, 3241, 3252, (3258, 3260), 3269,
3273, (3278, 3285), (3287, 3294), 3295, (3300, 3302), 3312,
(3315, 3328), 3332, 3341, 3345, 3397, 3401, (3408, 3412), (3428, 3430),
(3456, 3458), 3460, (3479, 3482), 3506, 3516, (3518, 3520),
(3527, 3530), (3531, 3535), 3541, 3543, (3552, 3558), (3568, 3570),
(3573, 3585), (3643, 3647), (3676, 3713), 3715, 3717, 3723, 3748, 3750,
(3774, 3776), 3781, 3783, (3790, 3792), (3802, 3804), (3808, 3840),
3912, (3949, 3953), 3992, 4029, 4045, (4059, 4096), 4294, (4296, 4301),
(4302, 4304), 4681, (4686, 4688), 4695, 4697, (4702, 4704), 4745,
(4750, 4752), 4785, (4790, 4792), 4799, 4801, (4806, 4808), 4823, 4881,
(4886, 4888), (4955, 4957), (4989, 4992), (5018, 5024), (5110, 5112),
(5118, 5120), (5789, 5792), (5881, 5888), 5901, (5909, 5920),
(5943, 5952), (5972, 5984), 5997, 6001, (6004, 6016), (6110, 6112),
(6122, 6128), (6138, 6144), 6159, (6170, 6176), (6265, 6272),
(6315, 6320), (6390, 6400), 6431, (6444, 6448), (6460, 6464),
(6465, 6468), (6510, 6512), (6517, 6528), (6572, 6576), (6602, 6608),
(6619, 6622), (6684, 6686), 6751, (6781, 6783), (6794, 6800),
(6810, 6816), (6830, 6832), (6847, 6912), (6988, 6992), (7037, 7040),
(7156, 7164), (7224, 7227), (7242, 7245), (7305, 7312), (7355, 7357),
(7368, 7376), (7419, 7424), 7674, (7958, 7960), (7966, 7968),
(8006, 8008), (8014, 8016), 8024, 8026, 8028, 8030, (8062, 8064), 8117,
8133, (8148, 8150), 8156, (8176, 8178), 8181, 8191, 8293, (8306, 8308),
8335, (8349, 8352), (8384, 8400), (8433, 8448), (8588, 8592),
(9255, 9280), (9291, 9312), (11124, 11126), (11158, 11160), 11311,
11359, (11508, 11513), 11558, (11560, 11565), (11566, 11568),
(11624, 11631), (11633, 11647), (11671, 11680), 11687, 11695, 11703,
11711, 11719, 11727, 11735, 11743, (11856, 11904), 11930,
(12020, 12032), (12246, 12272), (12284, 12288), 12352, (12439, 12441),
(12544, 12549), 12592, 12687, (12731, 12736), (12772, 12784), 12831,
(19894, 19904), (40944, 40960), (42125, 42128), (42183, 42192),
(42540, 42560), (42744, 42752), (42944, 42946), (42951, 42999),
(43052, 43056), (43066, 43072), (43128, 43136), (43206, 43214),
(43226, 43232), (43348, 43359), (43389, 43392), 43470, (43482, 43486),
43519, (43575, 43584), (43598, 43600), (43610, 43612), (43715, 43739),
(43767, 43777), (43783, 43785), (43791, 43793), (43799, 43808), 43815,
43823, (43880, 43888), (44014, 44016), (44026, 44032), (55204, 55216),
(55239, 55243), (55292, 55296), (64110, 64112), (64218, 64256),
(64263, 64275), (64280, 64285), 64311, 64317, 64319, 64322, 64325,
(64450, 64467), (64832, 64848), (64912, 64914), (64968, 65008),
(65022, 65024), (65050, 65056), 65107, 65127, (65132, 65136), 65141,
(65277, 65279), 65280, (65471, 65474), (65480, 65482), (65488, 65490),
(65496, 65498), (65501, 65504), 65511, (65519, 65529), (65534, 65536),
65548, 65575, 65595, 65598, (65614, 65616), (65630, 65664),
(65787, 65792), (65795, 65799), (65844, 65847), 65935, (65948, 65952),
(65953, 66000), (66046, 66176), (66205, 66208), (66257, 66272),
(66300, 66304), (66340, 66349), (66379, 66384), (66427, 66432), 66462,
(66500, 66504), (66518, 66560), (66718, 66720), (66730, 66736),
(66772, 66776), (66812, 66816), (66856, 66864), (66916, 66927),
(66928, 67072), (67383, 67392), (67414, 67424), (67432, 67584),
(67590, 67592), 67593, 67638, (67641, 67644), (67645, 67647), 67670,
(67743, 67751), (67760, 67808), 67827, (67830, 67835), (67868, 67871),
(67898, 67903), (67904, 67968), (68024, 68028), (68048, 68050), 68100,
(68103, 68108), 68116, 68120, (68150, 68152), (68155, 68159),
(68169, 68176), (68185, 68192), (68256, 68288), (68327, 68331),
(68343, 68352), (68406, 68409), (68438, 68440), (68467, 68472),
(68498, 68505), (68509, 68521), (68528, 68608), (68681, 68736),
(68787, 68800), (68851, 68858), (68904, 68912), (68922, 69216),
(69247, 69376), (69416, 69424), (69466, 69600), (69623, 69632),
(69710, 69714), (69744, 69759), (69826, 69837), (69838, 69840),
(69865, 69872), (69882, 69888), 69941, (69959, 69968), (70007, 70016),
(70094, 70096), 70112, (70133, 70144), 70162, (70207, 70272), 70279,
70281, 70286, 70302, (70314, 70320), (70379, 70384), (70394, 70400),
70404, (70413, 70415), (70417, 70419), 70441, 70449, 70452, 70458,
(70469, 70471), (70473, 70475), (70478, 70480), (70481, 70487),
(70488, 70493), (70500, 70502), (70509, 70512), (70517, 70656), 70746,
70748, (70752, 70784), (70856, 70864), (70874, 71040), (71094, 71096),
(71134, 71168), (71237, 71248), (71258, 71264), (71277, 71296),
(71353, 71360), (71370, 71424), (71451, 71453), (71468, 71472),
(71488, 71680), (71740, 71840), (71923, 71935), (71936, 72096),
(72104, 72106), (72152, 72154), (72165, 72192), (72264, 72272),
(72355, 72384), (72441, 72704), 72713, 72759, (72774, 72784),
(72813, 72816), (72848, 72850), 72872, (72887, 72960), 72967, 72970,
(73015, 73018), 73019, 73022, (73032, 73040), (73050, 73056), 73062,
73065, 73103, 73106, (73113, 73120), (73130, 73440), (73465, 73664),
(73714, 73727), (74650, 74752), 74863, (74869, 74880), (75076, 77824),
78895, (78905, 82944), (83527, 92160), (92729, 92736), 92767,
(92778, 92782), (92784, 92880), (92910, 92912), (92918, 92928),
(92998, 93008), 93018, 93026, (93048, 93053), (93072, 93760),
(93851, 93952), (94027, 94031), (94088, 94095), (94112, 94176),
(94180, 94208), (100344, 100352), (101107, 110592), (110879, 110928),
(110931, 110948), (110952, 110960), (111356, 113664), (113771, 113776),
(113789, 113792), (113801, 113808), (113818, 113820), (113828, 118784),
(119030, 119040), (119079, 119081), (119273, 119296), (119366, 119520),
(119540, 119552), (119639, 119648), (119673, 119808), 119893, 119965,
(119968, 119970), (119971, 119973), (119975, 119977), 119981, 119994,
119996, 120004, 120070, (120075, 120077), 120085, 120093, 120122,
120127, 120133, (120135, 120138), 120145, (120486, 120488),
(120780, 120782), (121484, 121499), 121504, (121520, 122880), 122887,
(122905, 122907), 122914, 122917, (122923, 123136), (123181, 123184),
(123198, 123200), (123210, 123214), (123216, 123584), (123642, 123647),
(123648, 124928), (125125, 125127), (125143, 125184), (125260, 125264),
(125274, 125278), (125280, 126065), (126133, 126209), (126270, 126464),
126468, 126496, 126499, (126501, 126503), 126504, 126515, 126520,
126522, (126524, 126530), (126531, 126535), 126536, 126538, 126540,
126544, 126547, (126549, 126551), 126552, 126554, 126556, 126558,
126560, 126563, (126565, 126567), 126571, 126579, 126584, 126589,
126591, 126602, (126620, 126625), 126628, 126634, (126652, 126704),
(126706, 126976), (127020, 127024), (127124, 127136), (127151, 127153),
127168, 127184, (127222, 127232), (127245, 127248), (127341, 127344),
(127405, 127462), (127491, 127504), (127548, 127552), (127561, 127568),
(127570, 127584), (127590, 127744), (128726, 128736), (128749, 128752),
(128763, 128768), (128884, 128896), (128985, 128992), (129004, 129024),
(129036, 129040), (129096, 129104), (129114, 129120), (129160, 129168),
(129198, 129280), 129292, 129394, (129399, 129402), (129443, 129445),
(129451, 129454), (129483, 129485), (129620, 129632), (129646, 129648),
(129652, 129656), (129659, 129664), (129667, 129680), (129686, 131072),
(173783, 173824), (177973, 177984), (178206, 178208), (183970, 183984),
(191457, 194560), (195102, 917505), (917506, 917536), (917632, 917760),
(918000, 983040), (1048574, 1048576), (1114110, 1114112)],
'Co': [(57344, 63744), (983040, 1048574), (1048576, 1114110)],
'Cs': [(55296, 57344)],
'L': [(65, 91), (97, 123), 170, 181, 186, (192, 215), (216, 247), (248, 706),
(710, 722), (736, 741), 748, 750, (880, 885), (886, 888), (890, 894),
895, 902, (904, 907), 908, (910, 930), (931, 1014), (1015, 1154),
(1162, 1328), (1329, 1367), 1369, (1376, 1417), (1488, 1515),
(1519, 1523), (1568, 1611), (1646, 1648), (1649, 1748), 1749,
(1765, 1767), (1774, 1776), (1786, 1789), 1791, 1808, (1810, 1840),
(1869, 1958), 1969, (1994, 2027), (2036, 2038), 2042, (2048, 2070), 2074,
2084, 2088, (2112, 2137), (2144, 2155), (2208, 2229), (2230, 2238),
(2308, 2362), 2365, 2384, (2392, 2402), (2417, 2433), (2437, 2445),
(2447, 2449), (2451, 2473), (2474, 2481), 2482, (2486, 2490), 2493, 2510,
(2524, 2526), (2527, 2530), (2544, 2546), 2556, (2565, 2571),
(2575, 2577), (2579, 2601), (2602, 2609), (2610, 2612), (2613, 2615),
(2616, 2618), (2649, 2653), 2654, (2674, 2677), (2693, 2702),
(2703, 2706), (2707, 2729), (2730, 2737), (2738, 2740), (2741, 2746),
2749, 2768, (2784, 2786), 2809, (2821, 2829), (2831, 2833), (2835, 2857),
(2858, 2865), (2866, 2868), (2869, 2874), 2877, (2908, 2910),
(2911, 2914), 2929, 2947, (2949, 2955), (2958, 2961), (2962, 2966),
(2969, 2971), 2972, (2974, 2976), (2979, 2981), (2984, 2987),
(2990, 3002), 3024, (3077, 3085), (3086, 3089), (3090, 3113),
(3114, 3130), 3133, (3160, 3163), (3168, 3170), 3200, (3205, 3213),
(3214, 3217), (3218, 3241), (3242, 3252), (3253, 3258), 3261, 3294,
(3296, 3298), (3313, 3315), (3333, 3341), (3342, 3345), (3346, 3387),
3389, 3406, (3412, 3415), (3423, 3426), (3450, 3456), (3461, 3479),
(3482, 3506), (3507, 3516), 3517, (3520, 3527), (3585, 3633),
(3634, 3636), (3648, 3655), (3713, 3715), 3716, (3718, 3723),
(3724, 3748), 3749, (3751, 3761), (3762, 3764), 3773, (3776, 3781), 3782,
(3804, 3808), 3840, (3904, 3912), (3913, 3949), (3976, 3981),
(4096, 4139), 4159, (4176, 4182), (4186, 4190), 4193, (4197, 4199),
(4206, 4209), (4213, 4226), 4238, (4256, 4294), 4295, 4301, (4304, 4347),
(4348, 4681), (4682, 4686), (4688, 4695), 4696, (4698, 4702),
(4704, 4745), (4746, 4750), (4752, 4785), (4786, 4790), (4792, 4799),
4800, (4802, 4806), (4808, 4823), (4824, 4881), (4882, 4886),
(4888, 4955), (4992, 5008), (5024, 5110), (5112, 5118), (5121, 5741),
(5743, 5760), (5761, 5787), (5792, 5867), (5873, 5881), (5888, 5901),
(5902, 5906), (5920, 5938), (5952, 5970), (5984, 5997), (5998, 6001),
(6016, 6068), 6103, 6108, (6176, 6265), (6272, 6277), (6279, 6313), 6314,
(6320, 6390), (6400, 6431), (6480, 6510), (6512, 6517), (6528, 6572),
(6576, 6602), (6656, 6679), (6688, 6741), 6823, (6917, 6964),
(6981, 6988), (7043, 7073), (7086, 7088), (7098, 7142), (7168, 7204),
(7245, 7248), (7258, 7294), (7296, 7305), (7312, 7355), (7357, 7360),
(7401, 7405), (7406, 7412), (7413, 7415), 7418, (7424, 7616),
(7680, 7958), (7960, 7966), (7968, 8006), (8008, 8014), (8016, 8024),
8025, 8027, 8029, (8031, 8062), (8064, 8117), (8118, 8125), 8126,
(8130, 8133), (8134, 8141), (8144, 8148), (8150, 8156), (8160, 8173),
(8178, 8181), (8182, 8189), 8305, 8319, (8336, 8349), 8450, 8455,
(8458, 8468), 8469, (8473, 8478), 8484, 8486, 8488, (8490, 8494),
(8495, 8506), (8508, 8512), (8517, 8522), 8526, (8579, 8581),
(11264, 11311), (11312, 11359), (11360, 11493), (11499, 11503),
(11506, 11508), (11520, 11558), 11559, 11565, (11568, 11624), 11631,
(11648, 11671), (11680, 11687), (11688, 11695), (11696, 11703),
(11704, 11711), (11712, 11719), (11720, 11727), (11728, 11735),
(11736, 11743), 11823, (12293, 12295), (12337, 12342), (12347, 12349),
(12353, 12439), (12445, 12448), (12449, 12539), (12540, 12544),
(12549, 12592), (12593, 12687), (12704, 12731), (12784, 12800),
(13312, 19894), (19968, 40944), (40960, 42125), (42192, 42238),
(42240, 42509), (42512, 42528), (42538, 42540), (42560, 42607),
(42623, 42654), (42656, 42726), (42775, 42784), (42786, 42889),
(42891, 42944), (42946, 42951), (42999, 43010), (43011, 43014),
(43015, 43019), (43020, 43043), (43072, 43124), (43138, 43188),
(43250, 43256), 43259, (43261, 43263), (43274, 43302), (43312, 43335),
(43360, 43389), (43396, 43443), 43471, (43488, 43493), (43494, 43504),
(43514, 43519), (43520, 43561), (43584, 43587), (43588, 43596),
(43616, 43639), 43642, (43646, 43696), 43697, (43701, 43703),
(43705, 43710), 43712, 43714, (43739, 43742), (43744, 43755),
(43762, 43765), (43777, 43783), (43785, 43791), (43793, 43799),
(43808, 43815), (43816, 43823), (43824, 43867), (43868, 43880),
(43888, 44003), (44032, 55204), (55216, 55239), (55243, 55292),
(63744, 64110), (64112, 64218), (64256, 64263), (64275, 64280), 64285,
(64287, 64297), (64298, 64311), (64312, 64317), 64318, (64320, 64322),
(64323, 64325), (64326, 64434), (64467, 64830), (64848, 64912),
(64914, 64968), (65008, 65020), (65136, 65141), (65142, 65277),
(65313, 65339), (65345, 65371), (65382, 65471), (65474, 65480),
(65482, 65488), (65490, 65496), (65498, 65501), (65536, 65548),
(65549, 65575), (65576, 65595), (65596, 65598), (65599, 65614),
(65616, 65630), (65664, 65787), (66176, 66205), (66208, 66257),
(66304, 66336), (66349, 66369), (66370, 66378), (66384, 66422),
(66432, 66462), (66464, 66500), (66504, 66512), (66560, 66718),
(66736, 66772), (66776, 66812), (66816, 66856), (66864, 66916),
(67072, 67383), (67392, 67414), (67424, 67432), (67584, 67590), 67592,
(67594, 67638), (67639, 67641), 67644, (67647, 67670), (67680, 67703),
(67712, 67743), (67808, 67827), (67828, 67830), (67840, 67862),
(67872, 67898), (67968, 68024), (68030, 68032), 68096, (68112, 68116),
(68117, 68120), (68121, 68150), (68192, 68221), (68224, 68253),
(68288, 68296), (68297, 68325), (68352, 68406), (68416, 68438),
(68448, 68467), (68480, 68498), (68608, 68681), (68736, 68787),
(68800, 68851), (68864, 68900), (69376, 69405), 69415, (69424, 69446),
(69600, 69623), (69635, 69688), (69763, 69808), (69840, 69865),
(69891, 69927), 69956, (69968, 70003), 70006, (70019, 70067),
(70081, 70085), 70106, 70108, (70144, 70162), (70163, 70188),
(70272, 70279), 70280, (70282, 70286), (70287, 70302), (70303, 70313),
(70320, 70367), (70405, 70413), (70415, 70417), (70419, 70441),
(70442, 70449), (70450, 70452), (70453, 70458), 70461, 70480,
(70493, 70498), (70656, 70709), (70727, 70731), 70751, (70784, 70832),
(70852, 70854), 70855, (71040, 71087), (71128, 71132), (71168, 71216),
71236, (71296, 71339), 71352, (71424, 71451), (71680, 71724),
(71840, 71904), 71935, (72096, 72104), (72106, 72145), 72161, 72163,
72192, (72203, 72243), 72250, 72272, (72284, 72330), 72349,
(72384, 72441), (72704, 72713), (72714, 72751), 72768, (72818, 72848),
(72960, 72967), (72968, 72970), (72971, 73009), 73030, (73056, 73062),
(73063, 73065), (73066, 73098), 73112, (73440, 73459), (73728, 74650),
(74880, 75076), (77824, 78895), (82944, 83527), (92160, 92729),
(92736, 92767), (92880, 92910), (92928, 92976), (92992, 92996),
(93027, 93048), (93053, 93072), (93760, 93824), (93952, 94027), 94032,
(94099, 94112), (94176, 94178), 94179, (94208, 100344), (100352, 101107),
(110592, 110879), (110928, 110931), (110948, 110952), (110960, 111356),
(113664, 113771), (113776, 113789), (113792, 113801), (113808, 113818),
(119808, 119893), (119894, 119965), (119966, 119968), 119970,
(119973, 119975), (119977, 119981), (119982, 119994), 119995,
(119997, 120004), (120005, 120070), (120071, 120075), (120077, 120085),
(120086, 120093), (120094, 120122), (120123, 120127), (120128, 120133),
120134, (120138, 120145), (120146, 120486), (120488, 120513),
(120514, 120539), (120540, 120571), (120572, 120597), (120598, 120629),
(120630, 120655), (120656, 120687), (120688, 120713), (120714, 120745),
(120746, 120771), (120772, 120780), (123136, 123181), (123191, 123198),
123214, (123584, 123628), (124928, 125125), (125184, 125252), 125259,
(126464, 126468), (126469, 126496), (126497, 126499), 126500, 126503,
(126505, 126515), (126516, 126520), 126521, 126523, 126530, 126535,
126537, 126539, (126541, 126544), (126545, 126547), 126548, 126551,
126553, 126555, 126557, 126559, (126561, 126563), 126564,
(126567, 126571), (126572, 126579), (126580, 126584), (126585, 126589),
126590, (126592, 126602), (126603, 126620), (126625, 126628),
(126629, 126634), (126635, 126652), (131072, 173783), (173824, 177973),
(177984, 178206), (178208, 183970), (183984, 191457), (194560, 195102)],
'Ll': [(97, 123), 181, (223, 247), (248, 256), 257, 259, 261, 263, 265, 267,
269, 271, 273, 275, 277, 279, 281, 283, 285, 287, 289, 291, 293, 295,
297, 299, 301, 303, 305, 307, 309, (311, 313), 314, 316, 318, 320, 322,
324, 326, (328, 330), 331, 333, 335, 337, 339, 341, 343, 345, 347, 349,
351, 353, 355, 357, 359, 361, 363, 365, 367, 369, 371, 373, 375, 378,
380, (382, 385), 387, 389, 392, (396, 398), 402, 405, (409, 412), 414,
417, 419, 421, 424, (426, 428), 429, 432, 436, 438, (441, 443),
(445, 448), 454, 457, 460, 462, 464, 466, 468, 470, 472, 474,
(476, 478), 479, 481, 483, 485, 487, 489, 491, 493, (495, 497), 499,
501, 505, 507, 509, 511, 513, 515, 517, 519, 521, 523, 525, 527, 529,
531, 533, 535, 537, 539, 541, 543, 545, 547, 549, 551, 553, 555, 557,
559, 561, (563, 570), 572, (575, 577), 578, 583, 585, 587, 589,
(591, 660), (661, 688), 881, 883, 887, (891, 894), 912, (940, 975),
(976, 978), (981, 984), 985, 987, 989, 991, 993, 995, 997, 999, 1001,
1003, 1005, (1007, 1012), 1013, 1016, (1019, 1021), (1072, 1120), 1121,
1123, 1125, 1127, 1129, 1131, 1133, 1135, 1137, 1139, 1141, 1143, 1145,
1147, 1149, 1151, 1153, 1163, 1165, 1167, 1169, 1171, 1173, 1175, 1177,
1179, 1181, 1183, 1185, 1187, 1189, 1191, 1193, 1195, 1197, 1199, 1201,
1203, 1205, 1207, 1209, 1211, 1213, 1215, 1218, 1220, 1222, 1224, 1226,
1228, (1230, 1232), 1233, 1235, 1237, 1239, 1241, 1243, 1245, 1247,
1249, 1251, 1253, 1255, 1257, 1259, 1261, 1263, 1265, 1267, 1269, 1271,
1273, 1275, 1277, 1279, 1281, 1283, 1285, 1287, 1289, 1291, 1293, 1295,
1297, 1299, 1301, 1303, 1305, 1307, 1309, 1311, 1313, 1315, 1317, 1319,
1321, 1323, 1325, 1327, (1376, 1417), (4304, 4347), (4349, 4352),
(5112, 5118), (7296, 7305), (7424, 7468), (7531, 7544), (7545, 7579),
7681, 7683, 7685, 7687, 7689, 7691, 7693, 7695, 7697, 7699, 7701, 7703,
7705, 7707, 7709, 7711, 7713, 7715, 7717, 7719, 7721, 7723, 7725, 7727,
7729, 7731, 7733, 7735, 7737, 7739, 7741, 7743, 7745, 7747, 7749, 7751,
7753, 7755, 7757, 7759, 7761, 7763, 7765, 7767, 7769, 7771, 7773, 7775,
7777, 7779, 7781, 7783, 7785, 7787, 7789, 7791, 7793, 7795, 7797, 7799,
7801, 7803, 7805, 7807, 7809, 7811, 7813, 7815, 7817, 7819, 7821, 7823,
7825, 7827, (7829, 7838), 7839, 7841, 7843, 7845, 7847, 7849, 7851,
7853, 7855, 7857, 7859, 7861, 7863, 7865, 7867, 7869, 7871, 7873, 7875,
7877, 7879, 7881, 7883, 7885, 7887, 7889, 7891, 7893, 7895, 7897, 7899,
7901, 7903, 7905, 7907, 7909, 7911, 7913, 7915, 7917, 7919, 7921, 7923,
7925, 7927, 7929, 7931, 7933, (7935, 7944), (7952, 7958), (7968, 7976),
(7984, 7992), (8000, 8006), (8016, 8024), (8032, 8040), (8048, 8062),
(8064, 8072), (8080, 8088), (8096, 8104), (8112, 8117), (8118, 8120),
8126, (8130, 8133), (8134, 8136), (8144, 8148), (8150, 8152),
(8160, 8168), (8178, 8181), (8182, 8184), 8458, (8462, 8464), 8467,
8495, 8500, 8505, (8508, 8510), (8518, 8522), 8526, 8580,
(11312, 11359), 11361, (11365, 11367), 11368, 11370, 11372, 11377,
(11379, 11381), (11382, 11388), 11393, 11395, 11397, 11399, 11401,
11403, 11405, 11407, 11409, 11411, 11413, 11415, 11417, 11419, 11421,
11423, 11425, 11427, 11429, 11431, 11433, 11435, 11437, 11439, 11441,
11443, 11445, 11447, 11449, 11451, 11453, 11455, 11457, 11459, 11461,
11463, 11465, 11467, 11469, 11471, 11473, 11475, 11477, 11479, 11481,
11483, 11485, 11487, 11489, (11491, 11493), 11500, 11502, 11507,
(11520, 11558), 11559, 11565, 42561, 42563, 42565, 42567, 42569, 42571,
42573, 42575, 42577, 42579, 42581, 42583, 42585, 42587, 42589, 42591,
42593, 42595, 42597, 42599, 42601, 42603, 42605, 42625, 42627, 42629,
42631, 42633, 42635, 42637, 42639, 42641, 42643, 42645, 42647, 42649,
42651, 42787, 42789, 42791, 42793, 42795, 42797, (42799, 42802), 42803,
42805, 42807, 42809, 42811, 42813, 42815, 42817, 42819, 42821, 42823,
42825, 42827, 42829, 42831, 42833, 42835, 42837, 42839, 42841, 42843,
42845, 42847, 42849, 42851, 42853, 42855, 42857, 42859, 42861, 42863,
(42865, 42873), 42874, 42876, 42879, 42881, 42883, 42885, 42887, 42892,
42894, 42897, (42899, 42902), 42903, 42905, 42907, 42909, 42911, 42913,
42915, 42917, 42919, 42921, 42927, 42933, 42935, 42937, 42939, 42941,
42943, 42947, 43002, (43824, 43867), (43872, 43880), (43888, 43968),
(64256, 64263), (64275, 64280), (65345, 65371), (66600, 66640),
(66776, 66812), (68800, 68851), (71872, 71904), (93792, 93824),
(119834, 119860), (119886, 119893), (119894, 119912), (119938, 119964),
(119990, 119994), 119995, (119997, 120004), (120005, 120016),
(120042, 120068), (120094, 120120), (120146, 120172), (120198, 120224),
(120250, 120276), (120302, 120328), (120354, 120380), (120406, 120432),
(120458, 120486), (120514, 120539), (120540, 120546), (120572, 120597),
(120598, 120604), (120630, 120655), (120656, 120662), (120688, 120713),
(120714, 120720), (120746, 120771), (120772, 120778), 120779,
(125218, 125252)],
'Lm': [(688, 706), (710, 722), (736, 741), 748, 750, 884, 890, 1369, 1600,
(1765, 1767), (2036, 2038), 2042, 2074, 2084, 2088, 2417, 3654, 3782,
4348, 6103, 6211, 6823, (7288, 7294), (7468, 7531), 7544, (7579, 7616),
8305, 8319, (8336, 8349), (11388, 11390), 11631, 11823, 12293,
(12337, 12342), 12347, (12445, 12447), (12540, 12543), 40981,
(42232, 42238), 42508, 42623, (42652, 42654), (42775, 42784), 42864,
42888, (43000, 43002), 43471, 43494, 43632, 43741, (43763, 43765),
(43868, 43872), 65392, (65438, 65440), (92992, 92996), (94099, 94112),
(94176, 94178), 94179, (123191, 123198), 125259],
'Lo': [170, 186, 443, (448, 452), 660, (1488, 1515), (1519, 1523),
(1568, 1600), (1601, 1611), (1646, 1648), (1649, 1748), 1749,
(1774, 1776), (1786, 1789), 1791, 1808, (1810, 1840), (1869, 1958),
1969, (1994, 2027), (2048, 2070), (2112, 2137), (2144, 2155),
(2208, 2229), (2230, 2238), (2308, 2362), 2365, 2384, (2392, 2402),
(2418, 2433), (2437, 2445), (2447, 2449), (2451, 2473), (2474, 2481),
2482, (2486, 2490), 2493, 2510, (2524, 2526), (2527, 2530),
(2544, 2546), 2556, (2565, 2571), (2575, 2577), (2579, 2601),
(2602, 2609), (2610, 2612), (2613, 2615), (2616, 2618), (2649, 2653),
2654, (2674, 2677), (2693, 2702), (2703, 2706), (2707, 2729),
(2730, 2737), (2738, 2740), (2741, 2746), 2749, 2768, (2784, 2786),
2809, (2821, 2829), (2831, 2833), (2835, 2857), (2858, 2865),
(2866, 2868), (2869, 2874), 2877, (2908, 2910), (2911, 2914), 2929,
2947, (2949, 2955), (2958, 2961), (2962, 2966), (2969, 2971), 2972,
(2974, 2976), (2979, 2981), (2984, 2987), (2990, 3002), 3024,
(3077, 3085), (3086, 3089), (3090, 3113), (3114, 3130), 3133,
(3160, 3163), (3168, 3170), 3200, (3205, 3213), (3214, 3217),
(3218, 3241), (3242, 3252), (3253, 3258), 3261, 3294, (3296, 3298),
(3313, 3315), (3333, 3341), (3342, 3345), (3346, 3387), 3389, 3406,
(3412, 3415), (3423, 3426), (3450, 3456), (3461, 3479), (3482, 3506),
(3507, 3516), 3517, (3520, 3527), (3585, 3633), (3634, 3636),
(3648, 3654), (3713, 3715), 3716, (3718, 3723), (3724, 3748), 3749,
(3751, 3761), (3762, 3764), 3773, (3776, 3781), (3804, 3808), 3840,
(3904, 3912), (3913, 3949), (3976, 3981), (4096, 4139), 4159,
(4176, 4182), (4186, 4190), 4193, (4197, 4199), (4206, 4209),
(4213, 4226), 4238, (4352, 4681), (4682, 4686), (4688, 4695), 4696,
(4698, 4702), (4704, 4745), (4746, 4750), (4752, 4785), (4786, 4790),
(4792, 4799), 4800, (4802, 4806), (4808, 4823), (4824, 4881),
(4882, 4886), (4888, 4955), (4992, 5008), (5121, 5741), (5743, 5760),
(5761, 5787), (5792, 5867), (5873, 5881), (5888, 5901), (5902, 5906),
(5920, 5938), (5952, 5970), (5984, 5997), (5998, 6001), (6016, 6068),
6108, (6176, 6211), (6212, 6265), (6272, 6277), (6279, 6313), 6314,
(6320, 6390), (6400, 6431), (6480, 6510), (6512, 6517), (6528, 6572),
(6576, 6602), (6656, 6679), (6688, 6741), (6917, 6964), (6981, 6988),
(7043, 7073), (7086, 7088), (7098, 7142), (7168, 7204), (7245, 7248),
(7258, 7288), (7401, 7405), (7406, 7412), (7413, 7415), 7418,
(8501, 8505), (11568, 11624), (11648, 11671), (11680, 11687),
(11688, 11695), (11696, 11703), (11704, 11711), (11712, 11719),
(11720, 11727), (11728, 11735), (11736, 11743), 12294, 12348,
(12353, 12439), 12447, (12449, 12539), 12543, (12549, 12592),
(12593, 12687), (12704, 12731), (12784, 12800), (13312, 19894),
(19968, 40944), (40960, 40981), (40982, 42125), (42192, 42232),
(42240, 42508), (42512, 42528), (42538, 42540), 42606, (42656, 42726),
42895, 42999, (43003, 43010), (43011, 43014), (43015, 43019),
(43020, 43043), (43072, 43124), (43138, 43188), (43250, 43256), 43259,
(43261, 43263), (43274, 43302), (43312, 43335), (43360, 43389),
(43396, 43443), (43488, 43493), (43495, 43504), (43514, 43519),
(43520, 43561), (43584, 43587), (43588, 43596), (43616, 43632),
(43633, 43639), 43642, (43646, 43696), 43697, (43701, 43703),
(43705, 43710), 43712, 43714, (43739, 43741), (43744, 43755), 43762,
(43777, 43783), (43785, 43791), (43793, 43799), (43808, 43815),
(43816, 43823), (43968, 44003), (44032, 55204), (55216, 55239),
(55243, 55292), (63744, 64110), (64112, 64218), 64285, (64287, 64297),
(64298, 64311), (64312, 64317), 64318, (64320, 64322), (64323, 64325),
(64326, 64434), (64467, 64830), (64848, 64912), (64914, 64968),
(65008, 65020), (65136, 65141), (65142, 65277), (65382, 65392),
(65393, 65438), (65440, 65471), (65474, 65480), (65482, 65488),
(65490, 65496), (65498, 65501), (65536, 65548), (65549, 65575),
(65576, 65595), (65596, 65598), (65599, 65614), (65616, 65630),
(65664, 65787), (66176, 66205), (66208, 66257), (66304, 66336),
(66349, 66369), (66370, 66378), (66384, 66422), (66432, 66462),
(66464, 66500), (66504, 66512), (66640, 66718), (66816, 66856),
(66864, 66916), (67072, 67383), (67392, 67414), (67424, 67432),
(67584, 67590), 67592, (67594, 67638), (67639, 67641), 67644,
(67647, 67670), (67680, 67703), (67712, 67743), (67808, 67827),
(67828, 67830), (67840, 67862), (67872, 67898), (67968, 68024),
(68030, 68032), 68096, (68112, 68116), (68117, 68120), (68121, 68150),
(68192, 68221), (68224, 68253), (68288, 68296), (68297, 68325),
(68352, 68406), (68416, 68438), (68448, 68467), (68480, 68498),
(68608, 68681), (68864, 68900), (69376, 69405), 69415, (69424, 69446),
(69600, 69623), (69635, 69688), (69763, 69808), (69840, 69865),
(69891, 69927), 69956, (69968, 70003), 70006, (70019, 70067),
(70081, 70085), 70106, 70108, (70144, 70162), (70163, 70188),
(70272, 70279), 70280, (70282, 70286), (70287, 70302), (70303, 70313),
(70320, 70367), (70405, 70413), (70415, 70417), (70419, 70441),
(70442, 70449), (70450, 70452), (70453, 70458), 70461, 70480,
(70493, 70498), (70656, 70709), (70727, 70731), 70751, (70784, 70832),
(70852, 70854), 70855, (71040, 71087), (71128, 71132), (71168, 71216),
71236, (71296, 71339), 71352, (71424, 71451), (71680, 71724), 71935,
(72096, 72104), (72106, 72145), 72161, 72163, 72192, (72203, 72243),
72250, 72272, (72284, 72330), 72349, (72384, 72441), (72704, 72713),
(72714, 72751), 72768, (72818, 72848), (72960, 72967), (72968, 72970),
(72971, 73009), 73030, (73056, 73062), (73063, 73065), (73066, 73098),
73112, (73440, 73459), (73728, 74650), (74880, 75076), (77824, 78895),
(82944, 83527), (92160, 92729), (92736, 92767), (92880, 92910),
(92928, 92976), (93027, 93048), (93053, 93072), (93952, 94027), 94032,
(94208, 100344), (100352, 101107), (110592, 110879), (110928, 110931),
(110948, 110952), (110960, 111356), (113664, 113771), (113776, 113789),
(113792, 113801), (113808, 113818), (123136, 123181), 123214,
(123584, 123628), (124928, 125125), (126464, 126468), (126469, 126496),
(126497, 126499), 126500, 126503, (126505, 126515), (126516, 126520),
126521, 126523, 126530, 126535, 126537, 126539, (126541, 126544),
(126545, 126547), 126548, 126551, 126553, 126555, 126557, 126559,
(126561, 126563), 126564, (126567, 126571), (126572, 126579),
(126580, 126584), (126585, 126589), 126590, (126592, 126602),
(126603, 126620), (126625, 126628), (126629, 126634), (126635, 126652),
(131072, 173783), (173824, 177973), (177984, 178206), (178208, 183970),
(183984, 191457), (194560, 195102)],
'Lt': [453, 456, 459, 498, (8072, 8080), (8088, 8096), (8104, 8112), 8124,
8140, 8188],
'Lu': [(65, 91), (192, 215), (216, 223), 256, 258, 260, 262, 264, 266, 268,
270, 272, 274, 276, 278, 280, 282, 284, 286, 288, 290, 292, 294, 296,
298, 300, 302, 304, 306, 308, 310, 313, 315, 317, 319, 321, 323, 325,
327, 330, 332, 334, 336, 338, 340, 342, 344, 346, 348, 350, 352, 354,
356, 358, 360, 362, 364, 366, 368, 370, 372, 374, (376, 378), 379, 381,
(385, 387), 388, (390, 392), (393, 396), (398, 402), (403, 405),
(406, 409), (412, 414), (415, 417), 418, 420, (422, 424), 425, 428,
(430, 432), (433, 436), 437, (439, 441), 444, 452, 455, 458, 461, 463,
465, 467, 469, 471, 473, 475, 478, 480, 482, 484, 486, 488, 490, 492,
494, 497, 500, (502, 505), 506, 508, 510, 512, 514, 516, 518, 520, 522,
524, 526, 528, 530, 532, 534, 536, 538, 540, 542, 544, 546, 548, 550,
552, 554, 556, 558, 560, 562, (570, 572), (573, 575), 577, (579, 583),
584, 586, 588, 590, 880, 882, 886, 895, 902, (904, 907), 908,
(910, 912), (913, 930), (931, 940), 975, (978, 981), 984, 986, 988, 990,
992, 994, 996, 998, 1000, 1002, 1004, 1006, 1012, 1015, (1017, 1019),
(1021, 1072), 1120, 1122, 1124, 1126, 1128, 1130, 1132, 1134, 1136,
1138, 1140, 1142, 1144, 1146, 1148, 1150, 1152, 1162, 1164, 1166, 1168,
1170, 1172, 1174, 1176, 1178, 1180, 1182, 1184, 1186, 1188, 1190, 1192,
1194, 1196, 1198, 1200, 1202, 1204, 1206, 1208, 1210, 1212, 1214,
(1216, 1218), 1219, 1221, 1223, 1225, 1227, 1229, 1232, 1234, 1236,
1238, 1240, 1242, 1244, 1246, 1248, 1250, 1252, 1254, 1256, 1258, 1260,
1262, 1264, 1266, 1268, 1270, 1272, 1274, 1276, 1278, 1280, 1282, 1284,
1286, 1288, 1290, 1292, 1294, 1296, 1298, 1300, 1302, 1304, 1306, 1308,
1310, 1312, 1314, 1316, 1318, 1320, 1322, 1324, 1326, (1329, 1367),
(4256, 4294), 4295, 4301, (5024, 5110), (7312, 7355), (7357, 7360),
7680, 7682, 7684, 7686, 7688, 7690, 7692, 7694, 7696, 7698, 7700, 7702,
7704, 7706, 7708, 7710, 7712, 7714, 7716, 7718, 7720, 7722, 7724, 7726,
7728, 7730, 7732, 7734, 7736, 7738, 7740, 7742, 7744, 7746, 7748, 7750,
7752, 7754, 7756, 7758, 7760, 7762, 7764, 7766, 7768, 7770, 7772, 7774,
7776, 7778, 7780, 7782, 7784, 7786, 7788, 7790, 7792, 7794, 7796, 7798,
7800, 7802, 7804, 7806, 7808, 7810, 7812, 7814, 7816, 7818, 7820, 7822,
7824, 7826, 7828, 7838, 7840, 7842, 7844, 7846, 7848, 7850, 7852, 7854,
7856, 7858, 7860, 7862, 7864, 7866, 7868, 7870, 7872, 7874, 7876, 7878,
7880, 7882, 7884, 7886, 7888, 7890, 7892, 7894, 7896, 7898, 7900, 7902,
7904, 7906, 7908, 7910, 7912, 7914, 7916, 7918, 7920, 7922, 7924, 7926,
7928, 7930, 7932, 7934, (7944, 7952), (7960, 7966), (7976, 7984),
(7992, 8000), (8008, 8014), 8025, 8027, 8029, 8031, (8040, 8048),
(8120, 8124), (8136, 8140), (8152, 8156), (8168, 8173), (8184, 8188),
8450, 8455, (8459, 8462), (8464, 8467), 8469, (8473, 8478), 8484, 8486,
8488, (8490, 8494), (8496, 8500), (8510, 8512), 8517, 8579,
(11264, 11311), 11360, (11362, 11365), 11367, 11369, 11371,
(11373, 11377), 11378, 11381, (11390, 11393), 11394, 11396, 11398,
11400, 11402, 11404, 11406, 11408, 11410, 11412, 11414, 11416, 11418,
11420, 11422, 11424, 11426, 11428, 11430, 11432, 11434, 11436, 11438,
11440, 11442, 11444, 11446, 11448, 11450, 11452, 11454, 11456, 11458,
11460, 11462, 11464, 11466, 11468, 11470, 11472, 11474, 11476, 11478,
11480, 11482, 11484, 11486, 11488, 11490, 11499, 11501, 11506, 42560,
42562, 42564, 42566, 42568, 42570, 42572, 42574, 42576, 42578, 42580,
42582, 42584, 42586, 42588, 42590, 42592, 42594, 42596, 42598, 42600,
42602, 42604, 42624, 42626, 42628, 42630, 42632, 42634, 42636, 42638,
42640, 42642, 42644, 42646, 42648, 42650, 42786, 42788, 42790, 42792,
42794, 42796, 42798, 42802, 42804, 42806, 42808, 42810, 42812, 42814,
42816, 42818, 42820, 42822, 42824, 42826, 42828, 42830, 42832, 42834,
42836, 42838, 42840, 42842, 42844, 42846, 42848, 42850, 42852, 42854,
42856, 42858, 42860, 42862, 42873, 42875, (42877, 42879), 42880, 42882,
42884, 42886, 42891, 42893, 42896, 42898, 42902, 42904, 42906, 42908,
42910, 42912, 42914, 42916, 42918, 42920, (42922, 42927),
(42928, 42933), 42934, 42936, 42938, 42940, 42942, 42946,
(42948, 42951), (65313, 65339), (66560, 66600), (66736, 66772),
(68736, 68787), (71840, 71872), (93760, 93792), (119808, 119834),
(119860, 119886), (119912, 119938), 119964, (119966, 119968), 119970,
(119973, 119975), (119977, 119981), (119982, 119990), (120016, 120042),
(120068, 120070), (120071, 120075), (120077, 120085), (120086, 120093),
(120120, 120122), (120123, 120127), (120128, 120133), 120134,
(120138, 120145), (120172, 120198), (120224, 120250), (120276, 120302),
(120328, 120354), (120380, 120406), (120432, 120458), (120488, 120513),
(120546, 120571), (120604, 120629), (120662, 120687), (120720, 120745),
120778, (125184, 125218)],
'M': [(768, 880), (1155, 1162), (1425, 1470), 1471, (1473, 1475), (1476, 1478),
1479, (1552, 1563), (1611, 1632), 1648, (1750, 1757), (1759, 1765),
(1767, 1769), (1770, 1774), 1809, (1840, 1867), (1958, 1969),
(2027, 2036), 2045, (2070, 2074), (2075, 2084), (2085, 2088),
(2089, 2094), (2137, 2140), (2259, 2274), (2275, 2308), (2362, 2365),
(2366, 2384), (2385, 2392), (2402, 2404), (2433, 2436), 2492,
(2494, 2501), (2503, 2505), (2507, 2510), 2519, (2530, 2532), 2558,
(2561, 2564), 2620, (2622, 2627), (2631, 2633), (2635, 2638), 2641,
(2672, 2674), 2677, (2689, 2692), 2748, (2750, 2758), (2759, 2762),
(2763, 2766), (2786, 2788), (2810, 2816), (2817, 2820), 2876,
(2878, 2885), (2887, 2889), (2891, 2894), (2902, 2904), (2914, 2916),
2946, (3006, 3011), (3014, 3017), (3018, 3022), 3031, (3072, 3077),
(3134, 3141), (3142, 3145), (3146, 3150), (3157, 3159), (3170, 3172),
(3201, 3204), 3260, (3262, 3269), (3270, 3273), (3274, 3278),
(3285, 3287), (3298, 3300), (3328, 3332), (3387, 3389), (3390, 3397),
(3398, 3401), (3402, 3406), 3415, (3426, 3428), (3458, 3460), 3530,
(3535, 3541), 3542, (3544, 3552), (3570, 3572), 3633, (3636, 3643),
(3655, 3663), 3761, (3764, 3773), (3784, 3790), (3864, 3866), 3893, 3895,
3897, (3902, 3904), (3953, 3973), (3974, 3976), (3981, 3992),
(3993, 4029), 4038, (4139, 4159), (4182, 4186), (4190, 4193),
(4194, 4197), (4199, 4206), (4209, 4213), (4226, 4238), 4239,
(4250, 4254), (4957, 4960), (5906, 5909), (5938, 5941), (5970, 5972),
(6002, 6004), (6068, 6100), 6109, (6155, 6158), (6277, 6279), 6313,
(6432, 6444), (6448, 6460), (6679, 6684), (6741, 6751), (6752, 6781),
6783, (6832, 6847), (6912, 6917), (6964, 6981), (7019, 7028),
(7040, 7043), (7073, 7086), (7142, 7156), (7204, 7224), (7376, 7379),
(7380, 7401), 7405, 7412, (7415, 7418), (7616, 7674), (7675, 7680),
(8400, 8433), (11503, 11506), 11647, (11744, 11776), (12330, 12336),
(12441, 12443), (42607, 42611), (42612, 42622), (42654, 42656),
(42736, 42738), 43010, 43014, 43019, (43043, 43048), (43136, 43138),
(43188, 43206), (43232, 43250), 43263, (43302, 43310), (43335, 43348),
(43392, 43396), (43443, 43457), 43493, (43561, 43575), 43587,
(43596, 43598), (43643, 43646), 43696, (43698, 43701), (43703, 43705),
(43710, 43712), 43713, (43755, 43760), (43765, 43767), (44003, 44011),
(44012, 44014), 64286, (65024, 65040), (65056, 65072), 66045, 66272,
(66422, 66427), (68097, 68100), (68101, 68103), (68108, 68112),
(68152, 68155), 68159, (68325, 68327), (68900, 68904), (69446, 69457),
(69632, 69635), (69688, 69703), (69759, 69763), (69808, 69819),
(69888, 69891), (69927, 69941), (69957, 69959), 70003, (70016, 70019),
(70067, 70081), (70089, 70093), (70188, 70200), 70206, (70367, 70379),
(70400, 70404), (70459, 70461), (70462, 70469), (70471, 70473),
(70475, 70478), 70487, (70498, 70500), (70502, 70509), (70512, 70517),
(70709, 70727), 70750, (70832, 70852), (71087, 71094), (71096, 71105),
(71132, 71134), (71216, 71233), (71339, 71352), (71453, 71468),
(71724, 71739), (72145, 72152), (72154, 72161), 72164, (72193, 72203),
(72243, 72250), (72251, 72255), 72263, (72273, 72284), (72330, 72346),
(72751, 72759), (72760, 72768), (72850, 72872), (72873, 72887),
(73009, 73015), 73018, (73020, 73022), (73023, 73030), 73031,
(73098, 73103), (73104, 73106), (73107, 73112), (73459, 73463),
(92912, 92917), (92976, 92983), 94031, (94033, 94088), (94095, 94099),
(113821, 113823), (119141, 119146), (119149, 119155), (119163, 119171),
(119173, 119180), (119210, 119214), (119362, 119365), (121344, 121399),
(121403, 121453), 121461, 121476, (121499, 121504), (121505, 121520),
(122880, 122887), (122888, 122905), (122907, 122914), (122915, 122917),
(122918, 122923), (123184, 123191), (123628, 123632), (125136, 125143),
(125252, 125259), (917760, 918000)],
'Mc': [2307, 2363, (2366, 2369), (2377, 2381), (2382, 2384), (2434, 2436),
(2494, 2497), (2503, 2505), (2507, 2509), 2519, 2563, (2622, 2625),
2691, (2750, 2753), 2761, (2763, 2765), (2818, 2820), 2878, 2880,
(2887, 2889), (2891, 2893), 2903, (3006, 3008), (3009, 3011),
(3014, 3017), (3018, 3021), 3031, (3073, 3076), (3137, 3141),
(3202, 3204), 3262, (3264, 3269), (3271, 3273), (3274, 3276),
(3285, 3287), (3330, 3332), (3390, 3393), (3398, 3401), (3402, 3405),
3415, (3458, 3460), (3535, 3538), (3544, 3552), (3570, 3572),
(3902, 3904), 3967, (4139, 4141), 4145, 4152, (4155, 4157),
(4182, 4184), (4194, 4197), (4199, 4206), (4227, 4229), (4231, 4237),
4239, (4250, 4253), 6070, (6078, 6086), (6087, 6089), (6435, 6439),
(6441, 6444), (6448, 6450), (6451, 6457), (6681, 6683), 6741, 6743,
6753, (6755, 6757), (6765, 6771), 6916, 6965, 6971, (6973, 6978),
(6979, 6981), 7042, 7073, (7078, 7080), 7082, 7143, (7146, 7149), 7150,
(7154, 7156), (7204, 7212), (7220, 7222), 7393, 7415, (12334, 12336),
(43043, 43045), 43047, (43136, 43138), (43188, 43204), (43346, 43348),
43395, (43444, 43446), (43450, 43452), (43454, 43457), (43567, 43569),
(43571, 43573), 43597, 43643, 43645, 43755, (43758, 43760), 43765,
(44003, 44005), (44006, 44008), (44009, 44011), 44012, 69632, 69634,
69762, (69808, 69811), (69815, 69817), 69932, (69957, 69959), 70018,
(70067, 70070), (70079, 70081), (70188, 70191), (70194, 70196), 70197,
(70368, 70371), (70402, 70404), (70462, 70464), (70465, 70469),
(70471, 70473), (70475, 70478), 70487, (70498, 70500), (70709, 70712),
(70720, 70722), 70725, (70832, 70835), 70841, (70843, 70847), 70849,
(71087, 71090), (71096, 71100), 71102, (71216, 71219), (71227, 71229),
71230, 71340, (71342, 71344), 71350, (71456, 71458), 71462,
(71724, 71727), 71736, (72145, 72148), (72156, 72160), 72164, 72249,
(72279, 72281), 72343, 72751, 72766, 72873, 72881, 72884,
(73098, 73103), (73107, 73109), 73110, (73461, 73463), (94033, 94088),
(119141, 119143), (119149, 119155)],
'Me': [(1160, 1162), 6846, (8413, 8417), (8418, 8421), (42608, 42611)],
'Mn': [(768, 880), (1155, 1160), (1425, 1470), 1471, (1473, 1475),
(1476, 1478), 1479, (1552, 1563), (1611, 1632), 1648, (1750, 1757),
(1759, 1765), (1767, 1769), (1770, 1774), 1809, (1840, 1867),
(1958, 1969), (2027, 2036), 2045, (2070, 2074), (2075, 2084),
(2085, 2088), (2089, 2094), (2137, 2140), (2259, 2274), (2275, 2307),
2362, 2364, (2369, 2377), 2381, (2385, 2392), (2402, 2404), 2433, 2492,
(2497, 2501), 2509, (2530, 2532), 2558, (2561, 2563), 2620,
(2625, 2627), (2631, 2633), (2635, 2638), 2641, (2672, 2674), 2677,
(2689, 2691), 2748, (2753, 2758), (2759, 2761), 2765, (2786, 2788),
(2810, 2816), 2817, 2876, 2879, (2881, 2885), 2893, 2902, (2914, 2916),
2946, 3008, 3021, 3072, 3076, (3134, 3137), (3142, 3145), (3146, 3150),
(3157, 3159), (3170, 3172), 3201, 3260, 3263, 3270, (3276, 3278),
(3298, 3300), (3328, 3330), (3387, 3389), (3393, 3397), 3405,
(3426, 3428), 3530, (3538, 3541), 3542, 3633, (3636, 3643),
(3655, 3663), 3761, (3764, 3773), (3784, 3790), (3864, 3866), 3893,
3895, 3897, (3953, 3967), (3968, 3973), (3974, 3976), (3981, 3992),
(3993, 4029), 4038, (4141, 4145), (4146, 4152), (4153, 4155),
(4157, 4159), (4184, 4186), (4190, 4193), (4209, 4213), 4226,
(4229, 4231), 4237, 4253, (4957, 4960), (5906, 5909), (5938, 5941),
(5970, 5972), (6002, 6004), (6068, 6070), (6071, 6078), 6086,
(6089, 6100), 6109, (6155, 6158), (6277, 6279), 6313, (6432, 6435),
(6439, 6441), 6450, (6457, 6460), (6679, 6681), 6683, 6742,
(6744, 6751), 6752, 6754, (6757, 6765), (6771, 6781), 6783,
(6832, 6846), (6912, 6916), 6964, (6966, 6971), 6972, 6978,
(7019, 7028), (7040, 7042), (7074, 7078), (7080, 7082), (7083, 7086),
7142, (7144, 7146), 7149, (7151, 7154), (7212, 7220), (7222, 7224),
(7376, 7379), (7380, 7393), (7394, 7401), 7405, 7412, (7416, 7418),
(7616, 7674), (7675, 7680), (8400, 8413), 8417, (8421, 8433),
(11503, 11506), 11647, (11744, 11776), (12330, 12334), (12441, 12443),
42607, (42612, 42622), (42654, 42656), (42736, 42738), 43010, 43014,
43019, (43045, 43047), (43204, 43206), (43232, 43250), 43263,
(43302, 43310), (43335, 43346), (43392, 43395), 43443, (43446, 43450),
(43452, 43454), 43493, (43561, 43567), (43569, 43571), (43573, 43575),
43587, 43596, 43644, 43696, (43698, 43701), (43703, 43705),
(43710, 43712), 43713, (43756, 43758), 43766, 44005, 44008, 44013,
64286, (65024, 65040), (65056, 65072), 66045, 66272, (66422, 66427),
(68097, 68100), (68101, 68103), (68108, 68112), (68152, 68155), 68159,
(68325, 68327), (68900, 68904), (69446, 69457), 69633, (69688, 69703),
(69759, 69762), (69811, 69815), (69817, 69819), (69888, 69891),
(69927, 69932), (69933, 69941), 70003, (70016, 70018), (70070, 70079),
(70089, 70093), (70191, 70194), 70196, (70198, 70200), 70206, 70367,
(70371, 70379), (70400, 70402), (70459, 70461), 70464, (70502, 70509),
(70512, 70517), (70712, 70720), (70722, 70725), 70726, 70750,
(70835, 70841), 70842, (70847, 70849), (70850, 70852), (71090, 71094),
(71100, 71102), (71103, 71105), (71132, 71134), (71219, 71227), 71229,
(71231, 71233), 71339, 71341, (71344, 71350), 71351, (71453, 71456),
(71458, 71462), (71463, 71468), (71727, 71736), (71737, 71739),
(72148, 72152), (72154, 72156), 72160, (72193, 72203), (72243, 72249),
(72251, 72255), 72263, (72273, 72279), (72281, 72284), (72330, 72343),
(72344, 72346), (72752, 72759), (72760, 72766), 72767, (72850, 72872),
(72874, 72881), (72882, 72884), (72885, 72887), (73009, 73015), 73018,
(73020, 73022), (73023, 73030), 73031, (73104, 73106), 73109, 73111,
(73459, 73461), (92912, 92917), (92976, 92983), 94031, (94095, 94099),
(113821, 113823), (119143, 119146), (119163, 119171), (119173, 119180),
(119210, 119214), (119362, 119365), (121344, 121399), (121403, 121453),
121461, 121476, (121499, 121504), (121505, 121520), (122880, 122887),
(122888, 122905), (122907, 122914), (122915, 122917), (122918, 122923),
(123184, 123191), (123628, 123632), (125136, 125143), (125252, 125259),
(917760, 918000)],
'N': [(48, 58), (178, 180), 185, (188, 191), (1632, 1642), (1776, 1786),
(1984, 1994), (2406, 2416), (2534, 2544), (2548, 2554), (2662, 2672),
(2790, 2800), (2918, 2928), (2930, 2936), (3046, 3059), (3174, 3184),
(3192, 3199), (3302, 3312), (3416, 3423), (3430, 3449), (3558, 3568),
(3664, 3674), (3792, 3802), (3872, 3892), (4160, 4170), (4240, 4250),
(4969, 4989), (5870, 5873), (6112, 6122), (6128, 6138), (6160, 6170),
(6470, 6480), (6608, 6619), (6784, 6794), (6800, 6810), (6992, 7002),
(7088, 7098), (7232, 7242), (7248, 7258), 8304, (8308, 8314),
(8320, 8330), (8528, 8579), (8581, 8586), (9312, 9372), (9450, 9472),
(10102, 10132), 11517, 12295, (12321, 12330), (12344, 12347),
(12690, 12694), (12832, 12842), (12872, 12880), (12881, 12896),
(12928, 12938), (12977, 12992), (42528, 42538), (42726, 42736),
(43056, 43062), (43216, 43226), (43264, 43274), (43472, 43482),
(43504, 43514), (43600, 43610), (44016, 44026), (65296, 65306),
(65799, 65844), (65856, 65913), (65930, 65932), (66273, 66300),
(66336, 66340), 66369, 66378, (66513, 66518), (66720, 66730),
(67672, 67680), (67705, 67712), (67751, 67760), (67835, 67840),
(67862, 67868), (68028, 68030), (68032, 68048), (68050, 68096),
(68160, 68169), (68221, 68223), (68253, 68256), (68331, 68336),
(68440, 68448), (68472, 68480), (68521, 68528), (68858, 68864),
(68912, 68922), (69216, 69247), (69405, 69415), (69457, 69461),
(69714, 69744), (69872, 69882), (69942, 69952), (70096, 70106),
(70113, 70133), (70384, 70394), (70736, 70746), (70864, 70874),
(71248, 71258), (71360, 71370), (71472, 71484), (71904, 71923),
(72784, 72813), (73040, 73050), (73120, 73130), (73664, 73685),
(74752, 74863), (92768, 92778), (93008, 93018), (93019, 93026),
(93824, 93847), (119520, 119540), (119648, 119673), (120782, 120832),
(123200, 123210), (123632, 123642), (125127, 125136), (125264, 125274),
(126065, 126124), (126125, 126128), (126129, 126133), (126209, 126254),
(126255, 126270), (127232, 127245)],
'Nd': [(48, 58), (1632, 1642), (1776, 1786), (1984, 1994), (2406, 2416),
(2534, 2544), (2662, 2672), (2790, 2800), (2918, 2928), (3046, 3056),
(3174, 3184), (3302, 3312), (3430, 3440), (3558, 3568), (3664, 3674),
(3792, 3802), (3872, 3882), (4160, 4170), (4240, 4250), (6112, 6122),
(6160, 6170), (6470, 6480), (6608, 6618), (6784, 6794), (6800, 6810),
(6992, 7002), (7088, 7098), (7232, 7242), (7248, 7258), (42528, 42538),
(43216, 43226), (43264, 43274), (43472, 43482), (43504, 43514),
(43600, 43610), (44016, 44026), (65296, 65306), (66720, 66730),
(68912, 68922), (69734, 69744), (69872, 69882), (69942, 69952),
(70096, 70106), (70384, 70394), (70736, 70746), (70864, 70874),
(71248, 71258), (71360, 71370), (71472, 71482), (71904, 71914),
(72784, 72794), (73040, 73050), (73120, 73130), (92768, 92778),
(93008, 93018), (120782, 120832), (123200, 123210), (123632, 123642),
(125264, 125274)],
'Nl': [(5870, 5873), (8544, 8579), (8581, 8585), 12295, (12321, 12330),
(12344, 12347), (42726, 42736), (65856, 65909), 66369, 66378,
(66513, 66518), (74752, 74863)],
'No': [(178, 180), 185, (188, 191), (2548, 2554), (2930, 2936), (3056, 3059),
(3192, 3199), (3416, 3423), (3440, 3449), (3882, 3892), (4969, 4989),
(6128, 6138), 6618, 8304, (8308, 8314), (8320, 8330), (8528, 8544),
8585, (9312, 9372), (9450, 9472), (10102, 10132), 11517, (12690, 12694),
(12832, 12842), (12872, 12880), (12881, 12896), (12928, 12938),
(12977, 12992), (43056, 43062), (65799, 65844), (65909, 65913),
(65930, 65932), (66273, 66300), (66336, 66340), (67672, 67680),
(67705, 67712), (67751, 67760), (67835, 67840), (67862, 67868),
(68028, 68030), (68032, 68048), (68050, 68096), (68160, 68169),
(68221, 68223), (68253, 68256), (68331, 68336), (68440, 68448),
(68472, 68480), (68521, 68528), (68858, 68864), (69216, 69247),
(69405, 69415), (69457, 69461), (69714, 69734), (70113, 70133),
(71482, 71484), (71914, 71923), (72794, 72813), (73664, 73685),
(93019, 93026), (93824, 93847), (119520, 119540), (119648, 119673),
(125127, 125136), (126065, 126124), (126125, 126128), (126129, 126133),
(126209, 126254), (126255, 126270), (127232, 127245)],
'P': [(33, 36), (37, 43), (44, 48), (58, 60), (63, 65), (91, 94), 95, 123, 125,
161, 167, 171, (182, 184), 187, 191, 894, 903, (1370, 1376),
(1417, 1419), 1470, 1472, 1475, 1478, (1523, 1525), (1545, 1547),
(1548, 1550), 1563, (1566, 1568), (1642, 1646), 1748, (1792, 1806),
(2039, 2042), (2096, 2111), 2142, (2404, 2406), 2416, 2557, 2678, 2800,
3191, 3204, 3572, 3663, (3674, 3676), (3844, 3859), 3860, (3898, 3902),
3973, (4048, 4053), (4057, 4059), (4170, 4176), 4347, (4960, 4969), 5120,
5742, (5787, 5789), (5867, 5870), (5941, 5943), (6100, 6103),
(6104, 6107), (6144, 6155), (6468, 6470), (6686, 6688), (6816, 6823),
(6824, 6830), (7002, 7009), (7164, 7168), (7227, 7232), (7294, 7296),
(7360, 7368), 7379, (8208, 8232), (8240, 8260), (8261, 8274),
(8275, 8287), (8317, 8319), (8333, 8335), (8968, 8972), (9001, 9003),
(10088, 10102), (10181, 10183), (10214, 10224), (10627, 10649),
(10712, 10716), (10748, 10750), (11513, 11517), (11518, 11520), 11632,
(11776, 11823), (11824, 11856), (12289, 12292), (12296, 12306),
(12308, 12320), 12336, 12349, 12448, 12539, (42238, 42240),
(42509, 42512), 42611, 42622, (42738, 42744), (43124, 43128),
(43214, 43216), (43256, 43259), 43260, (43310, 43312), 43359,
(43457, 43470), (43486, 43488), (43612, 43616), (43742, 43744),
(43760, 43762), 44011, (64830, 64832), (65040, 65050), (65072, 65107),
(65108, 65122), 65123, 65128, (65130, 65132), (65281, 65284),
(65285, 65291), (65292, 65296), (65306, 65308), (65311, 65313),
(65339, 65342), 65343, 65371, 65373, (65375, 65382), (65792, 65795),
66463, 66512, 66927, 67671, 67871, 67903, (68176, 68185), 68223,
(68336, 68343), (68409, 68416), (68505, 68509), (69461, 69466),
(69703, 69710), (69819, 69821), (69822, 69826), (69952, 69956),
(70004, 70006), (70085, 70089), 70093, 70107, (70109, 70112),
(70200, 70206), 70313, (70731, 70736), 70747, 70749, 70854,
(71105, 71128), (71233, 71236), (71264, 71277), (71484, 71487), 71739,
72162, (72255, 72263), (72346, 72349), (72350, 72355), (72769, 72774),
(72816, 72818), (73463, 73465), 73727, (74864, 74869), (92782, 92784),
92917, (92983, 92988), 92996, (93847, 93851), 94178, 113823,
(121479, 121484), (125278, 125280)],
'Pc': [95, (8255, 8257), 8276, (65075, 65077), (65101, 65104), 65343],
'Pd': [45, 1418, 1470, 5120, 6150, (8208, 8214), 11799, 11802, (11834, 11836),
11840, 12316, 12336, 12448, (65073, 65075), 65112, 65123, 65293],
'Pe': [41, 93, 125, 3899, 3901, 5788, 8262, 8318, 8334, 8969, 8971, 9002,
10089, 10091, 10093, 10095, 10097, 10099, 10101, 10182, 10215, 10217,
10219, 10221, 10223, 10628, 10630, 10632, 10634, 10636, 10638, 10640,
10642, 10644, 10646, 10648, 10713, 10715, 10749, 11811, 11813, 11815,
11817, 12297, 12299, 12301, 12303, 12305, 12309, 12311, 12313, 12315,
(12318, 12320), 64830, 65048, 65078, 65080, 65082, 65084, 65086, 65088,
65090, 65092, 65096, 65114, 65116, 65118, 65289, 65341, 65373, 65376,
65379],
'Pf': [187, 8217, 8221, 8250, 11779, 11781, 11786, 11789, 11805, 11809],
'Pi': [171, 8216, (8219, 8221), 8223, 8249, 11778, 11780, 11785, 11788, 11804,
11808],
'Po': [(33, 36), (37, 40), 42, 44, (46, 48), (58, 60), (63, 65), 92, 161, 167,
(182, 184), 191, 894, 903, (1370, 1376), 1417, 1472, 1475, 1478,
(1523, 1525), (1545, 1547), (1548, 1550), 1563, (1566, 1568),
(1642, 1646), 1748, (1792, 1806), (2039, 2042), (2096, 2111), 2142,
(2404, 2406), 2416, 2557, 2678, 2800, 3191, 3204, 3572, 3663,
(3674, 3676), (3844, 3859), 3860, 3973, (4048, 4053), (4057, 4059),
(4170, 4176), 4347, (4960, 4969), 5742, (5867, 5870), (5941, 5943),
(6100, 6103), (6104, 6107), (6144, 6150), (6151, 6155), (6468, 6470),
(6686, 6688), (6816, 6823), (6824, 6830), (7002, 7009), (7164, 7168),
(7227, 7232), (7294, 7296), (7360, 7368), 7379, (8214, 8216),
(8224, 8232), (8240, 8249), (8251, 8255), (8257, 8260), (8263, 8274),
8275, (8277, 8287), (11513, 11517), (11518, 11520), 11632,
(11776, 11778), (11782, 11785), 11787, (11790, 11799), (11800, 11802),
11803, (11806, 11808), (11818, 11823), (11824, 11834), (11836, 11840),
11841, (11843, 11856), (12289, 12292), 12349, 12539, (42238, 42240),
(42509, 42512), 42611, 42622, (42738, 42744), (43124, 43128),
(43214, 43216), (43256, 43259), 43260, (43310, 43312), 43359,
(43457, 43470), (43486, 43488), (43612, 43616), (43742, 43744),
(43760, 43762), 44011, (65040, 65047), 65049, 65072, (65093, 65095),
(65097, 65101), (65104, 65107), (65108, 65112), (65119, 65122), 65128,
(65130, 65132), (65281, 65284), (65285, 65288), 65290, 65292,
(65294, 65296), (65306, 65308), (65311, 65313), 65340, 65377,
(65380, 65382), (65792, 65795), 66463, 66512, 66927, 67671, 67871,
67903, (68176, 68185), 68223, (68336, 68343), (68409, 68416),
(68505, 68509), (69461, 69466), (69703, 69710), (69819, 69821),
(69822, 69826), (69952, 69956), (70004, 70006), (70085, 70089), 70093,
70107, (70109, 70112), (70200, 70206), 70313, (70731, 70736), 70747,
70749, 70854, (71105, 71128), (71233, 71236), (71264, 71277),
(71484, 71487), 71739, 72162, (72255, 72263), (72346, 72349),
(72350, 72355), (72769, 72774), (72816, 72818), (73463, 73465), 73727,
(74864, 74869), (92782, 92784), 92917, (92983, 92988), 92996,
(93847, 93851), 94178, 113823, (121479, 121484), (125278, 125280)],
'Ps': [40, 91, 123, 3898, 3900, 5787, 8218, 8222, 8261, 8317, 8333, 8968, 8970,
9001, 10088, 10090, 10092, 10094, 10096, 10098, 10100, 10181, 10214,
10216, 10218, 10220, 10222, 10627, 10629, 10631, 10633, 10635, 10637,
10639, 10641, 10643, 10645, 10647, 10712, 10714, 10748, 11810, 11812,
11814, 11816, 11842, 12296, 12298, 12300, 12302, 12304, 12308, 12310,
12312, 12314, 12317, 64831, 65047, 65077, 65079, 65081, 65083, 65085,
65087, 65089, 65091, 65095, 65113, 65115, 65117, 65288, 65339, 65371,
65375, 65378],
'S': [36, 43, (60, 63), 94, 96, 124, 126, (162, 167), (168, 170), 172,
(174, 178), 180, 184, 215, 247, (706, 710), (722, 736), (741, 748), 749,
(751, 768), 885, (900, 902), 1014, 1154, (1421, 1424), (1542, 1545),
1547, (1550, 1552), 1758, 1769, (1789, 1791), 2038, (2046, 2048),
(2546, 2548), (2554, 2556), 2801, 2928, (3059, 3067), 3199, 3407, 3449,
3647, (3841, 3844), 3859, (3861, 3864), (3866, 3872), 3892, 3894, 3896,
(4030, 4038), (4039, 4045), (4046, 4048), (4053, 4057), (4254, 4256),
(5008, 5018), 5741, 6107, 6464, (6622, 6656), (7009, 7019), (7028, 7037),
8125, (8127, 8130), (8141, 8144), (8157, 8160), (8173, 8176),
(8189, 8191), 8260, 8274, (8314, 8317), (8330, 8333), (8352, 8384),
(8448, 8450), (8451, 8455), (8456, 8458), 8468, (8470, 8473),
(8478, 8484), 8485, 8487, 8489, 8494, (8506, 8508), (8512, 8517),
(8522, 8526), 8527, (8586, 8588), (8592, 8968), (8972, 9001),
(9003, 9255), (9280, 9291), (9372, 9450), (9472, 10088), (10132, 10181),
(10183, 10214), (10224, 10627), (10649, 10712), (10716, 10748),
(10750, 11124), (11126, 11158), (11160, 11264), (11493, 11499),
(11904, 11930), (11931, 12020), (12032, 12246), (12272, 12284), 12292,
(12306, 12308), 12320, (12342, 12344), (12350, 12352), (12443, 12445),
(12688, 12690), (12694, 12704), (12736, 12772), (12800, 12831),
(12842, 12872), 12880, (12896, 12928), (12938, 12977), (12992, 13312),
(19904, 19968), (42128, 42183), (42752, 42775), (42784, 42786),
(42889, 42891), (43048, 43052), (43062, 43066), (43639, 43642), 43867,
64297, (64434, 64450), (65020, 65022), 65122, (65124, 65127), 65129,
65284, 65291, (65308, 65311), 65342, 65344, 65372, 65374, (65504, 65511),
(65512, 65519), (65532, 65534), (65847, 65856), (65913, 65930),
(65932, 65935), (65936, 65948), 65952, (66000, 66045), (67703, 67705),
68296, 71487, (73685, 73714), (92988, 92992), 92997, 113820,
(118784, 119030), (119040, 119079), (119081, 119141), (119146, 119149),
(119171, 119173), (119180, 119210), (119214, 119273), (119296, 119362),
119365, (119552, 119639), 120513, 120539, 120571, 120597, 120629, 120655,
120687, 120713, 120745, 120771, (120832, 121344), (121399, 121403),
(121453, 121461), (121462, 121476), (121477, 121479), 123215, 123647,
126124, 126128, 126254, (126704, 126706), (126976, 127020),
(127024, 127124), (127136, 127151), (127153, 127168), (127169, 127184),
(127185, 127222), (127248, 127341), (127344, 127405), (127462, 127491),
(127504, 127548), (127552, 127561), (127568, 127570), (127584, 127590),
(127744, 128726), (128736, 128749), (128752, 128763), (128768, 128884),
(128896, 128985), (128992, 129004), (129024, 129036), (129040, 129096),
(129104, 129114), (129120, 129160), (129168, 129198), (129280, 129292),
(129293, 129394), (129395, 129399), (129402, 129443), (129445, 129451),
(129454, 129483), (129485, 129620), (129632, 129646), (129648, 129652),
(129656, 129659), (129664, 129667), (129680, 129686)],
'Sc': [36, (162, 166), 1423, 1547, (2046, 2048), (2546, 2548), 2555, 2801,
3065, 3647, 6107, (8352, 8384), 43064, 65020, 65129, 65284,
(65504, 65506), (65509, 65511), (73693, 73697), 123647, 126128],
'Sk': [94, 96, 168, 175, 180, 184, (706, 710), (722, 736), (741, 748), 749,
(751, 768), 885, (900, 902), 8125, (8127, 8130), (8141, 8144),
(8157, 8160), (8173, 8176), (8189, 8191), (12443, 12445),
(42752, 42775), (42784, 42786), (42889, 42891), 43867, (64434, 64450),
65342, 65344, 65507, (127995, 128000)],
'Sm': [43, (60, 63), 124, 126, 172, 177, 215, 247, 1014, (1542, 1545), 8260,
8274, (8314, 8317), (8330, 8333), 8472, (8512, 8517), 8523,
(8592, 8597), (8602, 8604), 8608, 8611, 8614, 8622, (8654, 8656), 8658,
8660, (8692, 8960), (8992, 8994), 9084, (9115, 9140), (9180, 9186),
9655, 9665, (9720, 9728), 9839, (10176, 10181), (10183, 10214),
(10224, 10240), (10496, 10627), (10649, 10712), (10716, 10748),
(10750, 11008), (11056, 11077), (11079, 11085), 64297, 65122,
(65124, 65127), 65291, (65308, 65311), 65372, 65374, 65506,
(65513, 65517), 120513, 120539, 120571, 120597, 120629, 120655, 120687,
120713, 120745, 120771, (126704, 126706)],
'So': [166, 169, 174, 176, 1154, (1421, 1423), (1550, 1552), 1758, 1769,
(1789, 1791), 2038, 2554, 2928, (3059, 3065), 3066, 3199, 3407, 3449,
(3841, 3844), 3859, (3861, 3864), (3866, 3872), 3892, 3894, 3896,
(4030, 4038), (4039, 4045), (4046, 4048), (4053, 4057), (4254, 4256),
(5008, 5018), 5741, 6464, (6622, 6656), (7009, 7019), (7028, 7037),
(8448, 8450), (8451, 8455), (8456, 8458), 8468, (8470, 8472),
(8478, 8484), 8485, 8487, 8489, 8494, (8506, 8508), 8522, (8524, 8526),
8527, (8586, 8588), (8597, 8602), (8604, 8608), (8609, 8611),
(8612, 8614), (8615, 8622), (8623, 8654), (8656, 8658), 8659,
(8661, 8692), (8960, 8968), (8972, 8992), (8994, 9001), (9003, 9084),
(9085, 9115), (9140, 9180), (9186, 9255), (9280, 9291), (9372, 9450),
(9472, 9655), (9656, 9665), (9666, 9720), (9728, 9839), (9840, 10088),
(10132, 10176), (10240, 10496), (11008, 11056), (11077, 11079),
(11085, 11124), (11126, 11158), (11160, 11264), (11493, 11499),
(11904, 11930), (11931, 12020), (12032, 12246), (12272, 12284), 12292,
(12306, 12308), 12320, (12342, 12344), (12350, 12352), (12688, 12690),
(12694, 12704), (12736, 12772), (12800, 12831), (12842, 12872), 12880,
(12896, 12928), (12938, 12977), (12992, 13312), (19904, 19968),
(42128, 42183), (43048, 43052), (43062, 43064), 43065, (43639, 43642),
65021, 65508, 65512, (65517, 65519), (65532, 65534), (65847, 65856),
(65913, 65930), (65932, 65935), (65936, 65948), 65952, (66000, 66045),
(67703, 67705), 68296, 71487, (73685, 73693), (73697, 73714),
(92988, 92992), 92997, 113820, (118784, 119030), (119040, 119079),
(119081, 119141), (119146, 119149), (119171, 119173), (119180, 119210),
(119214, 119273), (119296, 119362), 119365, (119552, 119639),
(120832, 121344), (121399, 121403), (121453, 121461), (121462, 121476),
(121477, 121479), 123215, 126124, 126254, (126976, 127020),
(127024, 127124), (127136, 127151), (127153, 127168), (127169, 127184),
(127185, 127222), (127248, 127341), (127344, 127405), (127462, 127491),
(127504, 127548), (127552, 127561), (127568, 127570), (127584, 127590),
(127744, 127995), (128000, 128726), (128736, 128749), (128752, 128763),
(128768, 128884), (128896, 128985), (128992, 129004), (129024, 129036),
(129040, 129096), (129104, 129114), (129120, 129160), (129168, 129198),
(129280, 129292), (129293, 129394), (129395, 129399), (129402, 129443),
(129445, 129451), (129454, 129483), (129485, 129620), (129632, 129646),
(129648, 129652), (129656, 129659), (129664, 129667),
(129680, 129686)],
'Z': [32, 160, 5760, (8192, 8203), (8232, 8234), 8239, 8287, 12288],
'Zl': [8232],
'Zp': [8233],
'Zs': [32, 160, 5760, (8192, 8203), 8239, 8287, 12288]
}
|
from __future__ import print_function
__author__ = 'nickv'
travis_file_name = '.travis.yml'
new_stable_branch_line = '/^stable\d+(\.\d+)?$/'
old_stable_branch_lines = {
'stable4',
'stable4.5',
'stable5',
'stable6',
'stable7',
'stable8',
'/^stable\d*$/'
}
def is_stable_branch_line(line):
"""
:param line: string
:return: bool
"""
return line[:6] == ' - ' and (line[6:-1] == new_stable_branch_line or line[6:-1] in old_stable_branch_lines)
travis_file = open(travis_file_name, 'r')
lines = travis_file.readlines()
travis_file.close()
travis_file = open(travis_file_name, 'w')
foundBranches = False
foundBranchesOnly = False
updatedBranchesOnly = False
for line in lines:
if foundBranchesOnly and (line == '' or line[:6] != ' - '):
travis_file.write(' - ' + new_stable_branch_line + '\n')
updatedBranchesOnly = True
foundBranchesOnly = False
if line == 'branches:\n':
foundBranches = True
elif foundBranches and line == ' only:\n':
foundBranchesOnly = True
if not foundBranchesOnly or not is_stable_branch_line(line):
travis_file.write(line)
if lines[-1][-1] != '\n':
travis_file.write('\n')
travis_file.close()
if updatedBranchesOnly:
print('Updated branches.only in .travis.yml')
else:
print('Could not find branches.only in .travis.yml')
exit(1)
|
# This file is part of the Indico plugins.
# Copyright (C) 2002 - 2020 CERN
#
# The Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License;
# see the LICENSE file for more details.
from __future__ import unicode_literals
import re
from wtforms import ValidationError
from indico.util.string import is_valid_mail
from indico_payment_paypal import _
def validate_business(form, field):
"""Valiates a PayPal business string.
It can either be an email address or a paypal business account ID.
"""
if not is_valid_mail(field.data, multi=False) and not re.match(r'^[a-zA-Z0-9]{13}$', field.data):
raise ValidationError(_('Invalid email address / paypal ID'))
|
from enum import Enum
class RoundType(Enum):
BLOODBATH = 'bloodbath'
DAY = 'day'
NIGHT = 'night'
FEAST = 'feast'
ARENA = 'arena'
FALLEN = 'fallen'
class ErrorCode(Enum):
NO_GAME = 0x0
GAME_EXISTS = 0x1
GAME_STARTED = 0x2
GAME_FULL = 0x3
PLAYER_EXISTS = 0x4
CHAR_LIMIT = 0x5
NOT_OWNER = 0x6
INVALID_GROUP = 0x7
NOT_ENOUGH_PLAYERS = 0x8
GAME_NOT_STARTED = 0x9
PLAYER_DOES_NOT_EXIST = 0xA
|
# -*-coding:utf-8 -*-
"""
Created on 2015-05-19
@author: Danny<manyunkai@hotmail.com>
DannyWork Project
"""
from __future__ import unicode_literals
from django import forms
from .models import ImageItem
class ImageItemForm(forms.ModelForm):
"""
图片对象 Form
"""
class Meta:
model = ImageItem
fields = ['title', 'author', 'description', 'image']
widgets = {
'description': forms.Textarea({'cols': '100', 'rows': '5'})
}
|
# -*- coding: utf8 -*-fr
"""
Import all class needed
"""
__version__ = '1.0'
__authors__ = ['Guillaume Philippon <guillaume.philippon@lal.in2p3.fr>']
from itopapi.model.prototype import ItopapiPrototype, ItopapiUnimplementedMethod
from itopapi.model.rack import ItopapiRack
from itopapi.model.server import ItopapiServer
from itopapi.model.osFamily import ItopapiOSFamily
from itopapi.model.osVersion import ItopapiOSVersion
from itopapi.model.osLicence import ItopapiOSLicence
from itopapi.model.vlan import ItopapiVLAN
from itopapi.model.subnet import ItopapiSubnet
from itopapi.model.physicalInterface import ItopapiPhysicalInterface
from itopapi.model.virtualMachine import ItopapiVirtualMachine
from itopapi.model.webServer import ItopapiWebServer
from itopapi.model.webApplication import ItopapiWebApplication
from itopapi.model.service import ItopapiService
from itopapi.model.organization import ItopapiOrganization
from itopapi.model.location import ItopapiLocation
from itopapi.model.enclosure import ItopapiEnclosure
from itopapi.model.brand import ItopapiBrand
from itopapi.model.model import ItopapiModel
from itopapi.model.applicationSolution import ItopapiApplicationSolution
# TODO partial list of missing classes, with no particular order: Peripheral, MobilePhone, Printer, PC, Phone, IPPhone,
# Tablet, TapeLibrary, SANSwitchNAS, PDU, PowerSource, DatabaseSchema, OtherSoftware
|
import os
import stat
import shutil
import subprocess
import git
def __remove_readonly(fn, path, excinfo):
if fn is os.rmdir:
os.chmod(path, stat.S_IWRITE)
os.rmdir(path)
elif fn is os.remove:
os.chmod(path, stat.S_IWRITE)
os.remove(path)
def delete(details):
if details['contents'] == details['project_name']:
shutil.rmtree(
details['dropbox_path'],
ignore_errors=False,
onerror=__remove_readonly
)
shutil.rmtree(
details['git_path'],
ignore_errors=False,
onerror=__remove_readonly
)
def ls(directory):
output = subprocess.check_output(
['ls', directory],
stderr=subprocess.STDOUT
)
return output.split('\n')
def init(details):
before = set(ls(details['git_root']))
subprocess.check_output(
['git', 'clone', details['contents']],
cwd=details['git_root'],
stderr=subprocess.STDOUT
)
after = set(ls(details['git_root']))
new = list(after - before)[0]
git_path = os.path.join(details['git_root'], new)
dropbox_path = os.path.join(details['dropbox_root'], new)
os.mkdir(dropbox_path)
git.__copy(git_path, dropbox_path)
incrontab = subprocess.check_output(
['incrontab', '-l']
)
with open(details['incron'], 'w') as fd:
fd.write(incrontab)
fd.write('\n')
fd.write(dropbox_path)
fd.write(' IN_CLOSE_WRITE,IN_CREATE,IN_MOVED_TO python ')
fd.write(details['mox_path'])
fd.write(' ')
fd.write(details['config_path'])
subprocess.check_output(
['incrontab', details['incron']],
stderr=subprocess.STDOUT
)
|
##fixing index fastq.gz seq names to match RDP assembler's seq names###
import sys
import gzip
from Bio import SeqIO
if len(sys.argv) != 3:
print("USAGE: python fix_index_fastqgz_names.py XXX_I.fastq.gz FIXED_I.fastq")
sys.exit()
f = gzip.open(sys.argv[1], 'rU')
output = open(sys.argv[2], 'w')
for records in SeqIO.parse(f, 'fastq'):
records.id = records.id.split(' ')[0] + ":0"
records.description = records.description.split(' ')[0] + ":0"
records.name = records.name.strip() + ":0"
# print records
SeqIO.write(records, output, "fastq")
|
'''
--- Part Two ---
The air conditioner comes online! Its cold air feels good for a while, but then the TEST alarms start to go off. Since the air conditioner can't vent its heat anywhere but back into the spacecraft, it's actually making the air inside the ship warmer.
Instead, you'll need to use the TEST to extend the thermal radiators. Fortunately, the diagnostic program (your puzzle input) is already equipped for this. Unfortunately, your Intcode computer is not.
Your computer is only missing a few opcodes:
Opcode 5 is jump-if-true: if the first parameter is non-zero, it sets the instruction pointer to the value from the second parameter. Otherwise, it does nothing.
Opcode 6 is jump-if-false: if the first parameter is zero, it sets the instruction pointer to the value from the second parameter. Otherwise, it does nothing.
Opcode 7 is less than: if the first parameter is less than the second parameter, it stores 1 in the position given by the third parameter. Otherwise, it stores 0.
Opcode 8 is equals: if the first parameter is equal to the second parameter, it stores 1 in the position given by the third parameter. Otherwise, it stores 0.
Like all instructions, these instructions need to support parameter modes as described above.
Normally, after an instruction is finished, the instruction pointer increases by the number of values in that instruction. However, if the instruction modifies the instruction pointer, that value is used and the instruction pointer is not automatically increased.
For example, here are several programs that take one input, compare it to the value 8, and then produce one output:
3,9,8,9,10,9,4,9,99,-1,8 - Using position mode, consider whether the input is equal to 8; output 1 (if it is) or 0 (if it is not).
3,9,7,9,10,9,4,9,99,-1,8 - Using position mode, consider whether the input is less than 8; output 1 (if it is) or 0 (if it is not).
3,3,1108,-1,8,3,4,3,99 - Using immediate mode, consider whether the input is equal to 8; output 1 (if it is) or 0 (if it is not).
3,3,1107,-1,8,3,4,3,99 - Using immediate mode, consider whether the input is less than 8; output 1 (if it is) or 0 (if it is not).
Here are some jump tests that take an input, then output 0 if the input was zero or 1 if the input was non-zero:
3,12,6,12,15,1,13,14,13,4,13,99,-1,0,1,9 (using position mode)
3,3,1105,-1,9,1101,0,0,12,4,12,99,1 (using immediate mode)
Here's a larger example:
3,21,1008,21,8,20,1005,20,22,107,8,21,20,1006,20,31,
1106,0,36,98,0,0,1002,21,125,20,4,20,1105,1,46,104,
999,1105,1,46,1101,1000,1,20,4,20,1105,1,46,98,99
The above example program uses an input instruction to ask for a single number. The program will then output 999 if the input value is below 8, output 1000 if the input value is equal to 8, or output 1001 if the input value is greater than 8.
This time, when the TEST diagnostic program runs its input instruction to get the ID of the system to test, provide it 5, the ID for the ship's thermal radiator controller. This diagnostic test suite only outputs one number, the diagnostic code.
What is the diagnostic code for system ID 5?
'''
import sys
from collections import deque
class Virtual_Machine:
def __init__(self, int_code, program_alarm=False, noun=12, verb=2, debug=False):
self.__debug_mode = debug
if self.__debug_mode:
print('Debug Mode...')
self.__int_code = int_code
self.__is_running = True
self.__pc = 0
self.__last_pc = self.__pc
self.__step_counter = 0
self.__arg_mode_stack = deque()
if program_alarm:
self.__int_code[1] = noun
self.__int_code[2] = verb
def is_running(self):
return self.__is_running
def step(self):
self.__last_pc = self.__pc
operate_length = self.__operate()
self.__pc += operate_length
self.__step_counter += 1
def first_position(self):
first_pos = 0
return self.__int_code[first_pos]
def print_debug_info(self):
self.__debug(' IntCode\n {}'.format(self.__int_code))
self.__debug('Is Running {}'.format(self.__is_running))
self.__debug(' PC {}'.format(self.__pc))
self.__debug(' LAST PC {}'.format(self.__last_pc))
self.__debug(' Steps {}'.format(self.__step_counter))
self.__debug(' arg modes {}'.format(self.__arg_mode_stack))
def __operate(self):
self.__arg_mode_stack.clear()
opcode = self.__int_code[self.__pc] % 100
modes = self.__int_code[self.__pc] // 100
for arg in range(3):
arg_mode = modes % 10
modes //= 10
self.__arg_mode_stack.appendleft(bool(arg_mode))
self.__debug('O({})'.format(opcode))
return \
{
1: self.__add,
2: self.__multiple,
3: self.__input,
4: self.__print,
5: self.__jmp_if_true,
6: self.__jmp_if_false,
7: self.__less_than,
8: self.__equals,
99: self.__exit
}[opcode]()
def __add(self):
arg1 = self.__read_arg()
arg2 = self.__read_arg()
self.__write_arg(arg1 + arg2)
return 1
def __multiple(self):
arg1 = self.__read_arg()
arg2 = self.__read_arg()
self.__write_arg(arg1 * arg2)
return 1
def __input(self):
val = 0
try:
val = int(input())
except Exception as e:
self.__debug('Reading input problem. Val={} ErrorMessage={}'.format(val, e))
pass
if not (0 <= val <= 99):
raise Exception('Passed value \'{}\' is not in range [{}, {}]', val, 0, 99)
self.__write_arg(val)
return 1
def __jmp_if_true(self):
arg1 = self.__read_arg()
jump_pc = self.__read_arg()
if arg1 != 0:
self.__pc = jump_pc
return 0
return 1
def __jmp_if_false(self):
arg1 = self.__read_arg()
jump_pc = self.__read_arg()
if arg1 == 0:
self.__pc = jump_pc
return 0
return 1
def __less_than(self):
arg1 = self.__read_arg()
arg2 = self.__read_arg()
if arg1 < arg2:
self.__write_arg(1)
else:
self.__write_arg(0)
return 1
def __equals(self):
arg1 = self.__read_arg()
arg2 = self.__read_arg()
if arg1 == arg2:
self.__write_arg(1)
else:
self.__write_arg(0)
return 1
def __print(self):
arg1 = self.__read_arg()
print(arg1, end='')
return 1
def __exit(self):
self.__is_running = False
return 1
# Helpers
def __read_arg(self):
self.__pc += 1
if self.__pc >= len(self.__int_code):
raise Exception('Segmentation fault')
ret = 0
if self.__arg_mode_stack.pop():
ret = self.__int_code[self.__pc]
else:
ret = self.__int_code[self.__int_code[self.__pc]]
return ret
def __write_arg(self, val):
self.__pc += 1
if self.__pc >= len(self.__int_code):
raise Exception('Segmentation fault')
if self.__arg_mode_stack.pop():
self.__int_code[self.__pc] = val
else:
self.__int_code[self.__int_code[self.__pc]] = val
def __debug(self, str):
if self.__debug_mode:
print('D:{}'.format(str))
def parse_file(file_path: str):
int_code = []
with open(file_path, 'r') as f:
for line in f:
int_code.extend(map(int, line.replace('\n', '').split(',')))
return int_code
def main(argv):
vm = Virtual_Machine(parse_file(argv[1]), debug=False)
while (vm.is_running()):
try:
vm.step()
except Exception as e:
print('E:', e)
break
vm.print_debug_info()
print('First Position Value = {}'.format(vm.first_position()))
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
# O(n) time | O(n) space
def minHeightBst(array):
return minHeightBstHelper(array, 0, len(array)-1, None)
def minHeightBstHelper(array, startIdx, endIdx, root):
if endIdx < startIdx:
return
mid = (endIdx + startIdx) // 2
if root:
if array[mid] < root.value:
root.left = BST(array[mid])
root = root.left
else:
root.right = BST(array[mid])
root = root.right
else:
root = BST(array[mid])
minHeightBstHelper(array, startIdx, mid-1, root)
minHeightBstHelper(array, mid+1, endIdx, root)
return root
class BST:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
|
# users/forms.py
# Django modules
from django.shortcuts import render
from django.views.generic import CreateView
from django.contrib.auth.views import LoginView, LogoutView
# Locals
from users.forms import RegisterForm
# Create your views here.
# Class:RegisterView
class RegisterView(CreateView):
# Note: order of 1,2, and 3 is metter
template_name = 'users/register.html' #1
form_class = RegisterForm #2
success_url = '/' #3
# Class:UserLoginView
class UserLoginView(LoginView):
template_name = 'users/login.html'
# Class:UserLogoutView
class UserLogoutView(LogoutView):
template_name = 'users/login.html' |
"""
Azimuth / elevation <==> Right ascension, declination
"""
from __future__ import annotations
from datetime import datetime
from .vallado import azel2radec as vazel2radec, radec2azel as vradec2azel
from .timeconv import str2dt # astropy can't handle xarray times (yet)
try:
from astropy.time import Time
from astropy import units as u
from astropy.coordinates import Angle, SkyCoord, EarthLocation, AltAz, ICRS
except ImportError:
pass
__all__ = ["radec2azel", "azel2radec"]
def azel2radec(
az_deg: float,
el_deg: float,
lat_deg: float,
lon_deg: float,
time: datetime,
) -> tuple[float, float]:
"""
viewing angle (az, el) to sky coordinates (ra, dec)
Parameters
----------
az_deg : float
azimuth [degrees clockwize from North]
el_deg : float
elevation [degrees above horizon (neglecting aberration)]
lat_deg : float
observer latitude [-90, 90]
lon_deg : float
observer longitude [-180, 180] (degrees)
time : datetime.datetime or str
time of observation
Returns
-------
ra_deg : float
ecliptic right ascension (degress)
dec_deg : float
ecliptic declination (degrees)
"""
try:
obs = EarthLocation(lat=lat_deg * u.deg, lon=lon_deg * u.deg)
direc = AltAz(
location=obs, obstime=Time(str2dt(time)), az=az_deg * u.deg, alt=el_deg * u.deg
)
sky = SkyCoord(direc.transform_to(ICRS()))
return sky.ra.deg, sky.dec.deg
except NameError:
return vazel2radec(az_deg, el_deg, lat_deg, lon_deg, time)
def radec2azel(
ra_deg: float,
dec_deg: float,
lat_deg: float,
lon_deg: float,
time: datetime,
) -> tuple[float, float]:
"""
sky coordinates (ra, dec) to viewing angle (az, el)
Parameters
----------
ra_deg : float
ecliptic right ascension (degress)
dec_deg : float
ecliptic declination (degrees)
lat_deg : float
observer latitude [-90, 90]
lon_deg : float
observer longitude [-180, 180] (degrees)
time : datetime.datetime or str
time of observation
Returns
-------
az_deg : float
azimuth [degrees clockwize from North]
el_deg : float
elevation [degrees above horizon (neglecting aberration)]
"""
try:
obs = EarthLocation(lat=lat_deg * u.deg, lon=lon_deg * u.deg)
points = SkyCoord(Angle(ra_deg, unit=u.deg), Angle(dec_deg, unit=u.deg), equinox="J2000.0")
altaz = points.transform_to(AltAz(location=obs, obstime=Time(str2dt(time))))
return altaz.az.degree, altaz.alt.degree
except NameError:
return vradec2azel(ra_deg, dec_deg, lat_deg, lon_deg, time)
|
# Copyright 2020 ByteDance Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from neurst.utils.configurable import extract_constructor_params
from neurst_pt.layers.common_layers import MultiHeadDenseLayer
class MultiHeadAttention(nn.Module):
""" Class of multi-head scaled-dot-product attention with input/output
transformations. """
def __init__(self,
input_depth,
num_heads,
num_units,
attention_key_depth=None,
attention_value_depth=None,
output_depth=None,
attention_dropout_rate=0.1,
attention_type="dot_product"):
""" Initializes the multi head attention layer.
Args:
input_depth: The dimension of the input tensor.
num_heads: A int scalar, the number of heads.
num_units: A int scalar, the default units if other `depth` is
not provided.
attention_key_depth: A int scalar, the dimension for projected
attention keys. If not provided, then use `num_units` as default.
attention_value_depth: A int scalar, the dimension for projected
attention values. If not provided, then use `num_units` as default.
output_depth: A int scalar, the dimension for projected
outputs. If not provided, then use `num_units` as default.
attention_dropout_rate: A float scalar, the dropout rate for attention weight.
attention_type: A string indicating the attention type.
"""
self._params = extract_constructor_params(locals(), verbose=False)
super(MultiHeadAttention, self).__init__()
self._input_depth = input_depth
self._num_heads = num_heads
self._num_units = num_units
self._attention_key_depth = attention_key_depth or num_units
self._attention_value_depth = attention_value_depth or num_units
self._output_depth = output_depth or num_units
self._attention_dropout_rate = attention_dropout_rate
self._attention_type = attention_type
if self._attention_key_depth % self._num_heads != 0:
raise ValueError("query depth ({}) must be divisible by the number of "
"attention heads ({}).".format(self._attention_key_depth, self._num_heads))
if self._attention_value_depth % self._num_heads != 0:
raise ValueError("value depth ({}) must be divisible by the number of "
"attention heads ({}).".format(self._attention_value_depth, self._num_heads))
# pre-create output transform layer
self._output_transform_layer = MultiHeadDenseLayer(
input_size=input_depth, output_units=self._output_depth,
num_heads=self._num_heads, is_output_transform=True,
use_bias=True)
self._build_qkv_transform_layer()
def _build_qkv_transform_layer(self):
""" Builds the layer.
Layers for linearly projecting the queries, keys, and values."""
self._q_transform_layer = MultiHeadDenseLayer(
input_size=self._input_depth, output_units=self._attention_key_depth,
num_heads=self._num_heads, is_output_transform=False,
use_bias=True)
self._kv_transform_layer = MultiHeadDenseLayer(
input_size=self._input_depth, is_output_transform=False,
output_units=[self._attention_key_depth, self._attention_value_depth],
num_heads=self._num_heads, use_bias=True)
def compute_qkv(self, query, memory, cache, decode_loop_step=None):
""" Computes linear transformations of query, keys and values.
Args:
query: A tensor with shape [batch_size, length_q, query_depth].
memory: A tensor with shape [batch_size, length_m, memory_depth].
cache: A dict, used during prediction.
decode_loop_step: An integer, step number of the decoding loop. Used only
for autoregressive inference with static-shape cache.
Returns: A tuple `(query_transformed, key_transformed, memory_transformed)`.
"""
_ = cache
_ = decode_loop_step
# [batch_size, length_q/k/v, num_heads, num_units_per_head]
q = self._q_transform_layer(query)
k, v = self._kv_transform_layer(memory)
return q, k, v
def att_fn(self, q, k, bias):
""" Computes attention weights according to attention_type.
Args:
q: Attention query tensor with shape
[batch_size, length_q, num_heads, att_key_depth / num_heads]
k: Attention query tensor with shape
[batch_size, length_k, num_heads, att_key_depth / num_heads]
bias: The bias tensor with shape [batch_size, length_q, length_k]
or [batch_size, 1, length_q, length_k]
Returns: The attention weight with shape
[batch_size, num_heads, length_q, length_k]
"""
if self._attention_type == "dot_product":
# B: batch_size
# T: length_k
# F: length_q
# N: num_heads
# H: depth per head
# logits: [batch_size, num_heads, length_q, length_k]
logits = torch.einsum("btnh,bfnh->bnft", k, q)
if bias is not None:
if bias.ndim == 2:
bias = bias.unsqueeze(1).unsqueeze(1)
elif bias.ndim != 4:
raise ValueError("bias tensor with {}-dim is not valid".format(bias.ndim))
logits += bias
# Note that softmax internally performs math operations using float32
# for numeric stability. When training with float16, we keep the input
# and output in float16 for better performance.
weights = F.softmax(logits, -1)
else:
raise NotImplementedError(
"att_fn for \"{}\" not implemented.".format(self._attention_type))
return weights
def forward(self,
query,
memory,
memory_bias=None,
cache=None,
is_training=True,
decode_loop_step=None):
""" Apply attention mechanism to query and memory.
Args:
query: A tensor with shape [batch_size, length_q, query_depth]
or [batch_size, query_depth].
memory: A tensor with shape [batch_size, length_m, memory_depth].
memory_bias: A tensor with shape [batch_size, length_m],
the attention bias that will be added to the result of the dot product.
cache: (Used during prediction) A dictionary with tensors containing
results of previous attentions. The dictionary must have the items:
{"keys": tensor with shape [batch_size, i, heads, dim_per_head],
"values": tensor with shape [batch_size, i, heads, dim_per_head]}
where i is the current decoded length.
is_training: A bool, whether in training mode or not.
decode_loop_step: An integer, step number of the decoding loop. Used only
for autoregressive inference with static-shape cache.
Returns:
Attention layer output with shape [batch_size, length_q, output_depth]
"""
query_is_2d = False
if query.ndim == 2:
# for using MultiHeadAttention in RNN-based decoders
query_is_2d = True
query = query.unsqueeze(1)
# linear transformation of q, k, v
q, k, v = self.compute_qkv(query, memory, cache, decode_loop_step)
# Scale query to prevent the dot product between query and key from growing
q *= (self._attention_key_depth // self._num_heads) ** (-0.5)
# compute attention weight, [batch_size, num_heads, length_q, length_k]
weights = self.att_fn(q, k, memory_bias)
weights = F.dropout(weights, p=self._attention_dropout_rate, training=is_training)
# sum over attention values
# N: num heads
# F: length_q
# T: length_k
# H: num units per head
# attention output: [batch_size, length_q, num_heads, num_units_per_head]
attention_output = torch.einsum("bnft,btnh->bfnh", weights, v)
# Run the outputs through another linear projection layer. Recombining heads
# is automatically done --> [batch_size, length_q, num_units]
attention_output = self._output_transform_layer(attention_output)
if query_is_2d:
# attention output: [batch_size, depth_value]
attention_output = attention_output.squeeze(1)
return attention_output
class MultiHeadSelfAttention(MultiHeadAttention):
""" Class of multi-head scaled-dot-product self-attention with input/output
transformations. """
def _build_qkv_transform_layer(self):
self._qkv_transform_layer = MultiHeadDenseLayer(
input_size=self._input_depth,
output_units=[self._attention_key_depth,
self._attention_key_depth,
self._attention_value_depth],
num_heads=self._num_heads,
is_output_transform=False,
use_bias=True)
def forward(self, query, bias=None, cache=None, is_training=True, decode_loop_step=None):
""" Builds the self-attention context. """
return super(MultiHeadSelfAttention, self).forward(
query=query,
memory=query,
memory_bias=bias,
cache=cache,
is_training=is_training,
decode_loop_step=decode_loop_step)
def compute_qkv(self, query, memory, cache, decode_loop_step=None):
""" Computes linear transformations of query, keys and values, especially
for self-attention in transformer.
Args:
query: Attention query tensor with shape [batch_size, length_q, channels_query],
or [batch_size, 1, channels_query] for decoder self-attention
memory: Unused.
cache: Used during prediction.
decode_loop_step: An integer, step number of the decoding loop. Used only
for autoregressive inference with static-shape cache.
Returns:
A tuple `(query_transformed, key_transformed, memory_transformed)`.
"""
_ = memory
q, k, v = self._qkv_transform_layer(query)
if cache is not None:
# for self-attention in transformer decoder when mode=INFER
if decode_loop_step is None:
k = torch.cat([cache["keys"], k], dim=1)
v = torch.cat([cache["values"], v], dim=1)
cache["keys"] = k
cache["values"] = v
else: # for dynamic shape
def _insert_curr(_cache, _new_val):
size = _cache.size()[1]
indices = torch.reshape(F.one_hot(decode_loop_step, size).to(_new_val.dtype),
[1, size, 1, 1])
new_val = _cache + _new_val * indices
return new_val
cache["keys"] = _insert_curr(cache["keys"], k)
cache["values"] = _insert_curr(cache["values"], v)
k = cache["keys"][:, :decode_loop_step + 1]
v = cache["values"][:, :decode_loop_step + 1]
return q, k, v
|
from setuptools import find_packages, setup
from city_scrapers_core import __version__
with open("README.md", "r") as f:
long_description = f.read()
setup(
name="city-scrapers-core",
version=__version__,
license="MIT",
author="Pat Sier",
author_email="pat@citybureau.org",
description="Core functionality for City Scrapers projects",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/City-Bureau/city-scrapers-core",
packages=find_packages(),
package_data={"": ["*"], "city_scrapers_core": ["templates/*"]},
install_requires=["jsonschema>=3.0.0a5", "pytz", "requests", "scrapy"],
tests_require=["flake8", "pytest", "isort"],
extras_require={
"aws": ["boto3"],
"azure": ["azure-storage-blob>=12"],
"gcs": ["google-cloud-storage"],
},
python_requires=">=3.6,<4.0",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Framework :: Scrapy",
],
)
|
from conans import ConanFile, tools
import os
import shutil
from distutils.dir_util import copy_tree
class CrashpadConan(ConanFile):
name = "crashpad"
version = "1.0"
short_paths = True
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False], "fPIC": [True, False]}
default_options = {"shared": False, "fPIC": True}
python_requires = "nla_pkg_helper/1.0"
python_requires_extend = "nla_pkg_helper.ConanPackageHelper"
pkg_helper = None
out_dir = "out"
release_dir = "Release"
depot_tools_dep = "depot_tools"
def init(self):
self.pkg_helper = self.python_requires["nla_pkg_helper"].module.ConanPackageHelper
self.pkg_helper.clean_conan_cache_by_detected_os_host_and_arch(self, self.name, self.version)
def build_requirements(self):
if tools.os_info.is_macos:
self.build_requires("depot_tools/cci.20201009")
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def source(self):
if tools.os_info.is_windows:
tools.mkdir(self.depot_tools_dep)
with tools.chdir(self.depot_tools_dep):
git = tools.Git()
git.clone("https://chromium.googlesource.com/chromium/tools/depot_tools.git")
with tools.environment_append({"PATH": [os.path.join(self.source_folder, self.depot_tools_dep)]}):
tools.mkdir(self.name)
with tools.chdir(self.name):
self.run(f"fetch {self.name}")
elif tools.os_info.is_macos:
self.run(f"fetch {self.name}", run_environment=True)
def build(self):
arch_map = {self.pkg_helper.ArchVariations.WIN10_X86_64_VARIATION.value: "x64",
self.pkg_helper.ArchVariations.MACOSX_ARM64_VARIATION.value: "arm64",
self.pkg_helper.ArchVariations.MACOSX_X86_64_VARIATION.value: "x64"}
if tools.os_info.is_windows:
with tools.chdir(os.path.join(self.build_folder, self.depot_tools_dep, self.name)):
with tools.chdir(os.path.join(self.build_folder, self.depot_tools_dep, self.name, self.name)):
with tools.environment_append({"PATH": [os.path.join(self.build_folder, self.depot_tools_dep)]}):
output_dir = os.path.join(self.build_folder, self.depot_tools_dep, self.name, self.name,
self.out_dir, self.release_dir)
self.run(f"gn gen {output_dir}")
self.run(f"ninja -C {output_dir}")
tools.save(os.path.join(output_dir, "args.gn"),
'target_cpu=\"%s\"' % arch_map[self.pkg_helper.get_bin_variation(self)], append=True)
self.run(f"ninja -C {output_dir}")
elif tools.os_info.is_macos:
with tools.chdir(self.name):
with tools.environment_append({"PATH": [self.deps_env_info["depot_tools"].DEPOT_TOOLS_PATH[0]]}):
output_dir = os.path.join(self.out_dir, self.release_dir)
self.run(f"gn gen {output_dir}")
self.run(f"ninja -C {output_dir}")
tools.save(os.path.join(self.build_folder, self.name, output_dir, "args.gn"),
'target_cpu=\"%s\"' % arch_map[self.pkg_helper.get_bin_variation(self)], append=True)
self.run(f"ninja -C {output_dir}")
def package(self):
src_bins = os.path.join(self.build_folder, self.depot_tools_dep, self.name, self.name, self.out_dir,
self.release_dir) if tools.os_info.is_windows else os.path.join(self.build_folder,
self.name, self.out_dir,
self.release_dir)
self.pkg_helper.package_all_bins_to_bin_variation_dir(self, bin_src=src_bins)
bin_variation = self.pkg_helper.get_bin_variation(self)
if tools.os_info.is_macos:
self.copy("*", dst=os.path.join(bin_variation, "bin"),
src=src_bins,
excludes=("*.h", "*.lib", "*.dll", "*.exe", "*.so", "*.dylib", "*.a",
"*.cpp", "*.c", "*.html", "*.js", "*.ninja", "*.gyp", "*.cc",
"*.py", "*.abilist", "*.inc", "*.exp", "*.m", "*.rst", "*.o", "*.stamp", "*.gn", "*.d",
"*.ninja_deps", "*.ninja_log"),
keep_path=False)
self._rename_static_libs_with_prefix("libcrashpad")
self.pkg_helper.build_macosx_universal_bins(self)
def _rename_static_libs_with_prefix(self, new_lib_prefix):
bin_variation = self.pkg_helper.get_bin_variation(self)
libs_path = os.path.join(self.package_folder, bin_variation, "lib")
if libs_path and os.path.exists(libs_path):
for root, dirs, files in os.walk(os.path.join(libs_path)):
for lib_file in files:
if lib_file.startswith("lib"):
tools.rename(os.path.join(libs_path, lib_file),
os.path.join(libs_path, lib_file.replace("lib", new_lib_prefix, 1)))
|
#!/usr/bin/env python2
# coding: utf-8
import errno
import os
import re
import string
import types
import subprocess32
from .colored_string import ColoredString
listtype = (types.TupleType, types.ListType)
invisible_chars = ''.join(map(unichr, range(0, 32)))
invisible_chars_re = re.compile('[%s]' % re.escape(invisible_chars))
def page(lines, max_lines=10, control_char=True, pager=('less',)):
if len(lines) > max_lines:
pp = {'stdin': subprocess32.PIPE,
'stdout': None,
'stderr': None}
cmd_pager = list(pager)
if control_char:
if pager == ('less', ):
cmd_pager += ['-r']
subproc = subprocess32.Popen(cmd_pager,
close_fds=True,
cwd='./',
**pp)
try:
out, err = subproc.communicate('\n'.join(lines))
except IOError as e:
if e[0] == errno.EPIPE:
pass
else:
raise
subproc.wait()
else:
os.write(1, '\n'.join(lines) + "\n")
def _findquote(line, quote):
if len(quote) == 0:
return -1, -1, []
i = 0
n = len(line)
escape = []
while i < n:
if line[i] == '\\':
escape.append(i)
i += 2
continue
if line[i] in quote:
quote_s = i - len(escape)
j = i
i += 1
while i < n and line[i] != line[j]:
if line[i] == '\\':
escape.append(i)
i += 2
continue
i += 1
if i < n:
quote_e = i - len(escape)
return quote_s, quote_e, escape
else:
return quote_s, -1, escape
i += 1
return -1, -1, escape
def parse_colon_kvs(data):
data = tokenize(data, quote='"\'')
ret = {}
for buf in data:
if ':' not in buf:
raise ValueError('invalid arguments, arguments'
'need key-val like: "k:v"')
k, v = buf.split(':', 1)
ret[k] = v
return ret
def tokenize(line, sep=None, quote='"\'', preserve=False):
if sep == quote:
raise ValueError, 'diffrent sep and quote is required'
if sep is None:
if len(line) == 0:
return []
line = line.strip()
rst = ['']
n = len(line)
i = 0
while i < n:
quote_s, quote_e, escape = _findquote(line[i:], quote)
if len(escape) > 0:
lines = []
x = 0
for e in escape:
lines.append(line[x:i + e])
x = i + e + 1
lines.append(line[x:])
line = ''.join(lines)
n = len(line)
if quote_s < 0:
sub = n
else:
sub = i + quote_s
if i < sub:
sub_rst = line[i:sub].split(sep)
if sep is None:
if line[sub - 1] in string.whitespace:
sub_rst.append('')
if line[i] in string.whitespace:
sub_rst.insert(0, '')
head = rst.pop()
sub_rst[0] = head + sub_rst[0]
rst += sub_rst
if quote_s < 0:
break
# discard incomplete
# 'a b"c' -> ['a']
if quote_e < 0:
rst.pop()
break
head = rst.pop()
if preserve:
head += line[i + quote_s:i + quote_e + 1]
else:
head += line[i + quote_s + 1:i + quote_e]
rst.append(head)
i += quote_e + 1
return rst
def line_pad(linestr, padding=''):
lines = linestr.split("\n")
if type(padding) in types.StringTypes:
lines = [padding + x for x in lines]
elif callable(padding):
lines = [padding(x) + x for x in lines]
lines = "\n".join(lines)
return lines
def format_line(items, sep=' ', aligns=''):
'''
format a line with multi-row columns.
items = [ 'name:',
[ 'John',
'j is my nick'
],
[ 'age:' ],
[ 26, ],
[ 'experience:' ],
[ '2000 THU',
'2006 sina',
'2010 other'
],
]
format_line(items, sep=' | ', aligns = 'llllll')
outputs:
name: | John | age: | 26 | experience: | 2000 THU
| j is my nick | | | | 2006 sina
| | | | | 2010 other
'''
aligns = [x for x in aligns] + [''] * len(items)
aligns = aligns[:len(items)]
aligns = ['r' if x == 'r' else x for x in aligns]
items = [(x if type(x) in listtype else [x])
for x in items]
items = [[_to_str(y)
for y in x]
for x in items]
maxHeight = max([len(x) for x in items] + [0])
def max_width(x): return max([y.__len__()
for y in x] + [0])
widths = [max_width(x) for x in items]
items = [(x + [''] * maxHeight)[:maxHeight]
for x in items]
lines = []
for i in range(maxHeight):
line = []
for j in range(len(items)):
width = widths[j]
elt = items[j][i]
actualWidth = elt.__len__()
elt = utf8str(elt)
if actualWidth < width:
padding = ' ' * (width - actualWidth)
if aligns[j] == 'l':
elt = elt + padding
else:
elt = padding + elt
line.append(elt)
line = sep.join(line)
lines.append(line)
return "\n".join(lines)
def struct_repr(data, key=None):
'''
Render a data to a multi-line structural(yaml-like) representation.
a = {
1: 3,
'x': {1:4, 2:5},
'l': [1, 2, 3],
}
for l in struct_repr(a):
print l
Output:
1 : 3
l : - 1
- 2
- 3
x : 1 : 4
2 : 5
'''
if type(data) in listtype:
if len(data) == 0:
return ['[]']
max_width = 0
elt_lines = []
for elt in data:
sublines = struct_repr(elt)
sublines_max_width = max([len(x) for x in sublines])
if max_width < sublines_max_width:
max_width = sublines_max_width
elt_lines.append(sublines)
lines = []
for sublines in elt_lines:
# - subline[0]
# subline[1]
# ...
lines.append('- ' + sublines[0].ljust(max_width))
for l in sublines[1:]:
lines.append(' ' + l.ljust(max_width))
return lines
elif type(data) == types.DictType:
if len(data) == 0:
return ['{}']
max_k_width = 0
max_v_width = 0
kvs = []
for k, v in data.items():
k = utf8str(k)
sublines = struct_repr(v)
sublines_max_width = max([len(x) for x in sublines])
if max_k_width < len(k):
max_k_width = len(k)
if max_v_width < sublines_max_width:
max_v_width = sublines_max_width
kvs.append((k, sublines))
kvs.sort(key=key)
lines = []
for k, sublines in kvs:
# foo : sub-0
# sub-1
# b : sub-0
# sub-0
lines.append(k.rjust(max_k_width) + ' : ' +
sublines[0].ljust(max_v_width))
for l in sublines[1:]:
lines.append(' '.rjust(max_k_width) +
' ' + l.ljust(max_v_width))
return lines
else:
data = filter_invisible_chars(data)
return [utf8str(data)]
def _get_key_and_headers(keys, rows):
if keys is None:
if len(rows) == 0:
keys = []
else:
r0 = rows[0]
if type(r0) == types.DictType:
keys = r0.keys()
keys.sort()
elif type(r0) in listtype:
keys = [i for i in range(len(r0))]
else:
keys = ['']
_keys = []
column_headers = []
for k in keys:
if type(k) not in listtype:
k = [k, k]
_keys.append(k[0])
column_headers.append(str(k[1]))
return _keys, column_headers
def _get_colors(colors, col_n):
if colors is None:
colors = []
colors = colors or ([None] * col_n)
while len(colors) < col_n:
colors.extend(colors)
colors = colors[:col_n]
return colors
def format_table(rows,
keys=None,
colors=None,
sep=' | ',
row_sep=None):
keys, column_headers = _get_key_and_headers(keys, rows)
colors = _get_colors(colors, len(keys))
# element of lns is a mulit-column line
# lns = [
# # line 1
# [
# # column 1 of line 1
# ['name:', # row 1 of column 1 of line 1
# 'foo', # row 2 of column 1 of line 1
# ],
#
# # column 2 of line 1
# ['school:',
# 'foo',
# 'bar',
# ],
# ],
# ]
# headers
lns = [
[[a + ': ']
for a in column_headers]
]
for row in rows:
if row_sep is not None:
lns.append([[None] for k in keys])
if type(row) == types.DictType:
ln = [struct_repr(row.get(k, ''))
for k in keys]
elif type(row) in listtype:
ln = [struct_repr(row[int(k)])
if len(row) > int(k) else ''
for k in keys]
else:
ln = [struct_repr(row)]
lns.append(ln)
def get_max_width(cols): return max([len(utf8str(c[0]))
for c in cols] + [0])
max_widths = [get_max_width(cols) for cols in zip(*lns)]
rows = []
for row in lns:
ln = []
for i in range(len(max_widths)):
color = colors[i]
w = max_widths[i]
ln.append([ColoredString(x.ljust(w), color)
if x is not None else row_sep * w
for x in row[i]])
rows.append(format_line(ln, sep=sep))
return rows
def filter_invisible_chars(data):
if type(data) not in (types.StringType, types.UnicodeType):
return data
return invisible_chars_re.sub('', data)
def _to_str(y):
if isinstance(y, ColoredString):
pass
elif type(y) in (type(0), type(0L)):
y = str(y)
elif type(y) in (type([]), type(()), type({})):
y = str(y)
return y
def utf8str(s):
if type(s) == type(u''):
return s.encode('utf8')
else:
return str(s)
def common_prefix(a, *others, **options):
recursive = options.get('recursive', True)
for b in others:
if type(a) != type(b):
raise TypeError('a and b has different type: ' + repr((a, b)))
a = _common_prefix(a, b, recursive)
return a
def _common_prefix(a, b, recursive=True):
rst = []
for i, elt in enumerate(a):
if i == len(b):
break
if type(elt) != type(b[i]):
raise TypeError('a and b has different type: ' + repr((elt, b[i])))
if elt == b[i]:
rst.append(elt)
else:
break
# Find common prefix of the last different element.
#
# string does not support nesting level reduction. It infinitely recurses
# down.
# And non-iterable element is skipped, such as int.
i = len(rst)
if recursive and i < len(a) and i < len(b) and not isinstance(a, basestring) and hasattr(a[i], '__len__'):
last_prefix = _common_prefix(a[i], b[i])
# discard empty tuple, list or string
if len(last_prefix) > 0:
rst.append(last_prefix)
if isinstance(a, tuple):
return tuple(rst)
elif isinstance(a, list):
return rst
else:
return ''.join(rst)
def break_line(linestr, width):
lines = linestr.splitlines()
rst = []
space = ' '
if isinstance(linestr, ColoredString):
space = ColoredString(' ')
for line in lines:
words = line.split(' ')
buf = words[0]
for word in words[1:]:
if len(word) + len(buf) + 1 > width:
rst.append(buf)
buf = word
else:
buf += space + word
if buf != '':
rst.append(buf)
return rst
|
from art.attacks import SaliencyMapMethod
from tools.art.adversarial_attack import AdversarialAttack
class SaliencyMapAttack(AdversarialAttack):
def __init__(self, model, theta=0.1, gamma=1.0, batch_size=16):
super().__init__(model=model)
self._theta = theta
self._gamma = gamma
self._method = SaliencyMapMethod(classifier=self.model, theta=self._theta, gamma=self._gamma,
batch_size=batch_size)
def attack_method(self, x, y=None):
params = {}
if y is not None:
params['y'] = y
return self._method.generate(x=x, **params)
|
#!/usr/bin/env python3
# Copyright (C) 2017-2020 The btclib developers
#
# This file is part of btclib. It is subject to the license terms in the
# LICENSE file found in the top-level directory of this distribution.
#
# No part of btclib including this file, may be copied, modified, propagated,
# or distributed except according to the terms contained in the LICENSE file.
from typing import Optional, Tuple, Union
from .alias import Point
from .bip32 import BIP32Key, BIP32KeyData
from .curve import Curve, mult, secp256k1
from .network import (
NETWORKS,
curve_from_xkeyversion,
network_from_xkeyversion,
xpubversions_from_network,
)
from .secpoint import bytes_from_point, point_from_octets
from .to_prvkey import PrvKey, prvkeyinfo_from_prvkey
from .utils import bytes_from_octets
# public key inputs:
# elliptic curve point as Union[Octets, BIP32Key, Point]
PubKey = Union[bytes, str, BIP32KeyData, Point]
# public or private key input,
# usable wherever a PubKey is logically expected
Key = Union[int, bytes, str, BIP32KeyData, Point]
def _point_from_xpub(xpub: BIP32Key, ec: Curve) -> Point:
"Return an elliptic curve point tuple from a xpub key."
if isinstance(xpub, BIP32KeyData):
xpub.assert_valid()
else:
xpub = BIP32KeyData.deserialize(xpub)
if xpub.key[0] in (2, 3):
ec2 = curve_from_xkeyversion(xpub.version)
if ec != ec2:
raise ValueError(f"ec/xpub version ({xpub.version.hex()}) mismatch")
return point_from_octets(xpub.key, ec)
raise ValueError(f"Not a public key: {xpub.key.hex()}")
def point_from_key(key: Key, ec: Curve = secp256k1) -> Point:
"""Return a point tuple from any possible key representation.
It supports:
- BIP32 extended keys (bytes, string, or BIP32KeyData)
- SEC Octets (bytes or hex-string, with 02, 03, or 04 prefix)
- native tuple
"""
if isinstance(key, tuple):
return point_from_pubkey(key, ec)
elif isinstance(key, int):
q, _, _ = prvkeyinfo_from_prvkey(key)
return mult(q, ec.G, ec)
else:
try:
q, net, _ = prvkeyinfo_from_prvkey(key)
except Exception:
pass
else:
if ec != NETWORKS[net].curve:
raise ValueError("Curve mismatch")
return mult(q, ec.G, ec)
return point_from_pubkey(key, ec)
def point_from_pubkey(pubkey: PubKey, ec: Curve = secp256k1) -> Point:
"Return an elliptic curve point tuple from a public key."
if isinstance(pubkey, tuple):
if ec.is_on_curve(pubkey) and pubkey[1] != 0:
return pubkey
raise ValueError(f"not a valid public key: {pubkey}")
elif isinstance(pubkey, BIP32KeyData):
return _point_from_xpub(pubkey, ec)
else:
try:
return _point_from_xpub(pubkey, ec)
except Exception:
pass
# it must be octets
try:
return point_from_octets(pubkey, ec)
except Exception:
raise ValueError(f"Not a public key: {pubkey!r}")
# not used so far, probably useless
# def point_from_prvkey(prvkey: PrvKey, network: Optional[str] = None)->Point:
# "Return an elliptic curve point tuple from a private key."
#
# q, net, compr = prvkeyinfo_from_prvkey(prvkey, network)
# ec = NETWORKS[net]['curve']
# return mult(q, ec.G, ec)
PubKeyInfo = Tuple[bytes, str]
def _pubkeyinfo_from_xpub(
xpub: BIP32Key, network: Optional[str] = None, compressed: Optional[bool] = None
) -> PubKeyInfo:
"""Return the pubkey tuple (SEC-bytes, network) from a BIP32 xpub.
BIP32Key is always compressed and includes network information:
here the 'network, compressed' input parameters are passed
only to allow consistency checks.
"""
compressed = True if compressed is None else compressed
if not compressed:
raise ValueError("Uncompressed SEC / compressed BIP32 mismatch")
if isinstance(xpub, BIP32KeyData):
xpub.assert_valid()
else:
xpub = BIP32KeyData.deserialize(xpub)
if xpub.key[0] not in (2, 3):
m = f"Not a public key: {xpub.serialize().decode('ascii')}"
raise ValueError(m)
if network is not None:
allowed_versions = xpubversions_from_network(network)
if xpub.version not in allowed_versions:
m = f"Not a {network} key: "
m += f"{xpub.serialize().decode('ascii')}"
raise ValueError(m)
return xpub.key, network
else:
return xpub.key, network_from_xkeyversion(xpub.version)
def pubkeyinfo_from_key(
key: Key, network: Optional[str] = None, compressed: Optional[bool] = None
) -> PubKeyInfo:
"Return the pub key tuple (SEC-bytes, network) from a pub/prv key."
if isinstance(key, tuple):
return pubkeyinfo_from_pubkey(key, network, compressed)
elif isinstance(key, int):
return pubkeyinfo_from_prvkey(key, network, compressed)
else:
try:
return pubkeyinfo_from_pubkey(key, network, compressed)
except Exception:
pass
# it must be a prvkey
try:
return pubkeyinfo_from_prvkey(key, network, compressed)
except Exception:
err_msg = "not a private or"
if compressed is not None:
err_msg += " compressed" if compressed else " uncompressed"
err_msg += " public key"
if network is not None:
err_msg += f" for {network}"
err_msg += f": {key!r}"
raise ValueError(err_msg)
def pubkeyinfo_from_pubkey(
pubkey: PubKey, network: Optional[str] = None, compressed: Optional[bool] = None
) -> PubKeyInfo:
"Return the pub key tuple (SEC-bytes, network) from a public key."
compr = True if compressed is None else compressed
net = "mainnet" if network is None else network
ec = NETWORKS[net].curve
if isinstance(pubkey, tuple):
return bytes_from_point(pubkey, ec, compr), net
elif isinstance(pubkey, BIP32KeyData):
return _pubkeyinfo_from_xpub(pubkey, network, compressed)
else:
try:
return _pubkeyinfo_from_xpub(pubkey, network, compressed)
except Exception:
pass
# it must be octets
try:
if compressed is None:
pubkey = bytes_from_octets(pubkey, (ec.psize + 1, 2 * ec.psize + 1))
compr = False
if len(pubkey) == ec.psize + 1:
compr = True
else:
size = ec.psize + 1 if compressed else 2 * ec.psize + 1
pubkey = bytes_from_octets(pubkey, size)
compr = compressed
except Exception:
raise ValueError("Not a public key")
# verify that it is a valid point
Q = point_from_octets(pubkey, ec)
return bytes_from_point(Q, ec, compr), net
def pubkeyinfo_from_prvkey(
prvkey: PrvKey, network: Optional[str] = None, compressed: Optional[bool] = None
) -> PubKeyInfo:
"Return the pub key tuple (SEC-bytes, network) from a private key."
q, net, compr = prvkeyinfo_from_prvkey(prvkey, network, compressed)
ec = NETWORKS[net].curve
Pub = mult(q, ec.G, ec)
pubkey = bytes_from_point(Pub, ec, compr)
return pubkey, net
|
'''4. Elaborar um programa para imprimir os números de 1 (inclusive) a 10 (inclusive)
em ordem decrescente.Utilize for'''
print("Numeros de 1 a 10 crescente")
for contador in range(10, 0, -1):
print(contador) |
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from six.moves import mock
from sasctl.core import PagedList, RestObj
from .test_pageiterator import paging
def test_len_no_paging():
items = [{'name': 'a'}, {'name': 'b'}, {'name': 'c'}]
obj = RestObj(items=items, count=len(items))
with mock.patch('sasctl.core.request') as request:
l = PagedList(obj)
assert len(l) == 3
for i, o in enumerate(l):
assert RestObj(items[i]) == o
# No request should have been made to retrieve additional data.
request.assert_not_called()
def test_getitem_no_paging():
items = [{'name': 'a'}, {'name': 'b'}, {'name': 'c'}]
obj = RestObj(items=items, count=len(items))
with mock.patch('sasctl.core.request') as request:
l = PagedList(obj)
for i in range(len(l)):
item = l[i]
assert RestObj(items[i]) == item
# No request should have been made to retrieve additional data.
request.assert_not_called()
def test_str():
"""Check str formatting of list."""
source_items = [{'name': 'a'}, {'name': 'b'}, {'name': 'c'},
{'name': 'd'}, {'name': 'e'}, {'name': 'f'}]
start = 2
limit = 2
with mock.patch('sasctl.core.request') as req:
obj = RestObj(items=source_items[:2],
count=len(source_items),
links=[{'rel': 'next',
'href': '/moaritems?start=%d&limit=%d' % (
start, limit)}])
def side_effect(_, link, **kwargs):
if 'start=2' in link:
result = source_items[1:1+limit]
elif 'start=4' in link:
result = source_items[3:3+limit]
return RestObj(items=result)
req.side_effect = side_effect
l = PagedList(obj)
for i in range(len(source_items)):
# Force access of each item to ensure it's downloaded
_ = l[i]
if i < len(source_items) - 1:
# Ellipses should indicate unfetched results unless we're
# at the end of the list
assert str(l).endswith(', ... ]')
else:
assert not str(l).endswith(', ... ]')
def test_getitem_paging(paging):
"""Check that list can be enumerated."""
obj, items, _ = paging
l = PagedList(obj)
# length of list should equal total # of items
assert len(l) == len(items)
for i, item in enumerate(l):
assert item.name == RestObj(items[i]).name
def test_zip_paging(paging):
"""Check that zip() works correctly with the list."""
obj, items, _ = paging
l = PagedList(obj)
# length of list should equal total # of items
assert len(l) == len(items)
for target, actual in zip(items, l):
assert RestObj(target).name == actual.name
def test_slice_paging(paging):
"""Check that [i:j] syntax works correctly with the list."""
obj, items, _ = paging
l = PagedList(obj)
# length of list should equal total # of items
assert len(l) == len(items)
# Generate pairs of start:stop indexes that intentionally exceed
# the size of the array, and include empty sequences.
starts = range(len(l) + 1)
stops = range(len(l), -1, -1)
for start, stop in zip(starts, stops):
target = items[start:stop]
actual = l[start:stop]
for i, item in enumerate(actual):
assert item.name == RestObj(target[i]).name
def test_copy(paging):
"""Check that [:] syntax works correctly with the list."""
obj, items, _ = paging
l = PagedList(obj)
# length of list should equal total # of items
assert len(l) == len(items)
target = items[:]
actual = l[:]
assert len(actual) == len(l)
for i, item in enumerate(actual):
assert item.name == RestObj(target[i]).name
|
"""
taskcat python module
"""
from .collector import *
from .configurator import *
from .deployer import *
from .mutator import *
from .reporter import *
from .stacker import *
from .reaper import *
from .tester import *
from .utils import *
from .validator import *
|
"""
consume_reconcile_queue
"""
|
__author__ = 'tonycastronova'
__AdapedBy__ = 'AdelAbdallah'
class Schema():
def __init__(self, name):
self.__name = name
self.__tables = []
def name(self):
return self.__name
def add_table(self, table):
self.__tables.append(table)
def get_tables(self):
return self.__tables
class Table():
def __init__(self, name, pk):
self.__name = name
self.__pk = pk
self.__colums = []
self.__foreignkeys = []
def name(self):
return self.__name
def pk(self):
return self.__pk
def add_column(self, column):
self.__colums.append(column)
def get_columns(self):
return self.__colums
def add_foreignkey(self, fk):
self.__foreignkeys.append(fk)
def get_foreignkeys(self):
return self.__foreignkeys
class Column():
def __init__(self, name, datatype, primarykey=False, autoincrement= False, length=None, scale=None, unsigned=False, id=None, nullable=False,default=None):
self.__name = name
self.__ds = datatype
self.__ln = length
self.__sc = scale
self.__un = bool(int(unsigned))
self.__id = id
self.__au = bool(int(autoincrement))
self.__nu = bool(int(nullable))
self.__df = default
self.__pk = bool(int(primarykey))
#self.__arr
#self.__en
#self.__sg
def get_attributes(self):
return {'name':self.__name,
'dtype':self.__ds,
'length': self.__ln,
'scale': self.__sc,
'unsigned':self.__un,
'id':self.__id,
'autoincrement':self.__au,
'nullable':self.__nu,
'default':self.__df,
'primarykey':self.__pk}
class ForeignKey():
def __init__(self, name, parentSch, parentTbl, parentCol, childSch, childTbl, childCol ):
self.name = name
self.parentSch = parentSch
self.parentTbl = parentTbl
self.parentCol = parentCol
self.childSch = childSch
self.childTbl = childTbl
self.childCol = childCol
|
"""
Definition of the :class:`AnalysisVersion` class.
"""
from typing import Any
from django.db import models
from django_analyses.models import help_text
from django_analyses.models.managers.analysis_version import (
AnalysisVersionManager,
)
from django_analyses.models.utils import get_analysis_version_interface
from django_extensions.db.models import TimeStampedModel, TitleDescriptionModel
class AnalysisVersion(TitleDescriptionModel, TimeStampedModel):
"""
A :class:`~django.db.models.Model` representing a single analysis version
in the database.
Each :class:`~django_analyses.models.analysis_version.AnalysisVersion`
instance should be assigned an interface through the project's
:attr:`ANALYSIS_INTERFACES` setting (for more information see
:ref:`user_guide/analysis_integration/simplified_example:Interface
Integration` and
:ref:`user_guide/analysis_integration/integration_customization:Integration
Customization`).
"""
analysis = models.ForeignKey(
"django_analyses.Analysis",
on_delete=models.CASCADE,
blank=True,
null=True,
related_name="version_set",
)
"""
The :class:`~django_analyses.models.analysis.Analysis` instance to which
this analysis version belongs.
"""
input_specification = models.ForeignKey(
"django_analyses.InputSpecification",
on_delete=models.PROTECT,
blank=True,
null=True,
related_name="analysis_version_set",
)
"""
The
:class:`~django_analyses.models.input.input_specification.InputSpecification`
instance specifying the
:class:`~django_analyses.models.input.definitions.input_definition.InputDefinition`
subclasses associated with this analysis version.
"""
output_specification = models.ForeignKey(
"django_analyses.OutputSpecification",
on_delete=models.PROTECT,
blank=True,
null=True,
related_name="analysis_version_set",
)
"""
The
:class:`~django_analyses.models.output.output_specification.OutputSpecification`
instance specifying the
:class:`~django_analyses.models.output.definitions.output_definition.OutputDefinition`
subclasses associated with this analysis version.
"""
#############################
# Integration customization #
#############################
run_method_key = models.CharField(
max_length=100, default="run", help_text=help_text.RUN_METHOD_KEY
)
"""
Custom *run* method name for the interface.
Each analysis version is expected to have some class associated with it and
used as an interface for running the analysis. This field determines the
name of the method that will be called (default value is *"run"*).
"""
fixed_run_method_kwargs = models.JSONField(
default=dict, help_text=help_text.FIXED_KWARGS
)
"""
Any "fixed" keyword arguments that should always be passed to the
interface's *run* method at execution.
"""
nested_results_attribute = models.CharField(
max_length=100,
blank=True,
null=True,
help_text=help_text.NESTED_RESULTS_ATTRIBUTE,
)
"""
Analysis interfaces are expected to return a dictionary of the results. In
case this analysis version's interface returns some object which conatins
the desired dictionary, this field allows specifying the attribute or
method that may be used to retrieve it.
Example
-------
Nipype_\'s interfaces generally return some kind of
:class:`InterfaceResults` object with an :attr:`outputs` attribute that may
be used to create a dictionary of the results by calling the
:meth:`get_traitsfree` method.
In order to integrate smoothly with Nipype's interfaces, we could simply
specify *nested_results_attribute="outputs.get_traitsfree"* when creating
the appropriate analysis versions.
.. _Nipype: https://nipype.readthedocs.io/en/latest/
"""
#####################
# Execution Options #
#####################
max_parallel = models.PositiveIntegerField(
default=4, help_text=help_text.MAX_PARALLEL
)
"""
Maximal number of parallel executions that may be run using Celery_. This
attribute is used in :func:`~django_analyses.tasks.execute_node` to
chunk an iterable of node inputs in case it is longer than this value.
For more information see Celery's `Chunks documentation`_.
.. _Celery:
https://docs.celeryproject.org/
.. _Chunks documentation:
https://docs.celeryproject.org/en/stable/userguide/canvas.html#chunks
"""
objects = AnalysisVersionManager()
class Meta:
unique_together = "analysis", "title"
ordering = (
"analysis",
"-title",
)
def __str__(self) -> str:
"""
Returns the string representation of the
:class:`~django_analyses.models.analysis_version.AnalysisVersion`
instance.
Returns
-------
str
String representation of this instance
"""
return f"{self.analysis.title} v{self.title}"
def get_interface(self) -> object:
"""
Queries the project's settings to locate the instance's interface.
For more information see
:ref:`user_guide/analysis_integration/simplified_example:Interface
Integration`.
Returns
-------
:obj:`object`
Interface class used to run this version of the analysis
Raises
------
NotImplementedError
No interface could be found for this analysis
"""
return get_analysis_version_interface(self)
def get_interface_initialization_kwargs(self, **kwargs) -> dict:
"""
Returns the parameters required at the interface's class
initialization.
Returns
-------
dict
Initialization parameters as a keyword arguments dict
"""
return {
key: value
for key, value in kwargs.items()
if not self.input_definitions.get(key=key).run_method_input
}
def get_run_method_kwargs(self, **kwargs) -> dict:
"""
Returns the parameters required when calling the interface's
:meth:`run` method.
Returns
-------
dict
:meth:`run` method parameters as a keyword arguments dict
"""
return {
key: value
for key, value in kwargs.items()
if self.input_definitions.get(key=key).run_method_input
}
def run_interface(self, **kwargs) -> dict:
"""
Call the interface class's :meth:`run` method with the given keyword
arguments.
Returns
-------
dict
Dictionary of results
"""
# Initialize the interface class
init_kwargs = self.get_interface_initialization_kwargs(**kwargs)
instance = self.interface(**init_kwargs)
# Prepare run method kwargs
run_method_kwargs = {
**self.fixed_run_method_kwargs,
**self.get_run_method_kwargs(**kwargs),
}
# Run the analysis and return the results dictionary
run_method = getattr(instance, self.run_method_key)
return run_method(**run_method_kwargs)
def extract_results(self, results: Any) -> dict:
"""
Extracts a results dictionary from an arbitrary results object in case
the :attr:`nested_results_attribute` is not `None`.
Parameters
----------
results : Any
Arbitrary results object
Returns
-------
dict
Results dictionary
"""
for nested_attribute in self.nested_results_parts:
results = getattr(results, nested_attribute)
return results if isinstance(results, dict) else results()
def run(self, **kwargs) -> dict:
"""
Runs the interface safely by validating the input according to the
instance's
:attr:`~django_analyses.models.analysis_version.AnalysisVersion.input_specification`
and applying any special integration customizations (for more
information see
:ref:`user_guide/analysis_integration/integration_customization:Integration
Customization`).
Returns
-------
dict
Results dictionary
"""
self.input_specification.validate_kwargs(**kwargs)
raw_results = self.run_interface(**kwargs)
return self.extract_results(raw_results)
def update_input_with_defaults(self, configuration: dict) -> dict:
"""
Updates a configuration specified as keyword arguments with the
instance's
:attr:`~django_analyses.models.analysis_version.AnalysisVersion.input_specification`
defaults.
Parameters
----------
configuration : dict
Input configuration (excluding default values)
Returns
-------
dict
Configuration updated with default values
"""
defaults = self.input_specification.default_configuration.copy()
return {**defaults, **configuration}
@property
def nested_results_parts(self) -> list:
"""
Splits the
:attr:`~django_analyses.models.analysis_version.AnalysisVersion.nested_results_attribute`
at *"."* indices in case of a deeply nested attribute.
Returns
-------
list
Listed parts of nested result dictionary location
"""
return (
self.nested_results_attribute.split(".")
if self.nested_results_attribute
else []
)
@property
def input_definitions(self) -> models.QuerySet:
"""
Returns the associated instance's
:class:`~django_analyses.models.input.definitions.input_definition.InputDefinition`
subclasses as defined in its
:attr:`~django_analyses.models.analysis_version.AnalysisVersion.input_specification`.
Returns
-------
:class:`~django.db.models.query.QuerySet`
:class:`~django_analyses.models.input.definitions.input_definition.InputDefinition`
subclasses
"""
return self.input_specification.input_definitions
@property
def output_definitions(self) -> models.QuerySet:
"""
Returns the associated instance's
:class:`~django_analyses.models.output.definitions.output_definition.OutputDefinition`
subclasses as defined in its
:attr:`~django_analyses.models.analysis_version.AnalysisVersion.output_specification`.
Returns
-------
:class:`~django.db.models.query.QuerySet`
:class:`~django_analyses.models.output.definitions.output_definition.OutputDefinition`
subclasses
"""
return self.output_specification.output_definitions
@property
def interface(self) -> type:
"""
Returns the associated interface for this instance.
Returns
-------
type
Analysis interface class
"""
return self.get_interface()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
import copy
from math import inf
import tempfile
from typing import Any, Dict, Type, cast
import unittest
import numpy as np
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP
import fairscale.optim as optim
from fairscale.utils.testing import check_same_model_params, skip_if_no_cuda, skip_if_py39_no_cuda, skip_if_single_gpu
BACKEND = dist.Backend.NCCL if torch.cuda.is_available() else dist.Backend.GLOO # type: ignore
DEVICE = "cuda" if torch.cuda.is_available() else torch.device("cpu")
RECIPIENT_RANK = 1
try:
from torch.distributed import broadcast_object_list # noqa
_torch_broadcast_object = True
except ImportError:
from fairscale.optim.utils import broadcast_object # noqa
_torch_broadcast_object = False
def dist_init(rank, world_size, tempfile_name, backend=BACKEND):
url = "file://" + tempfile_name
dist.init_process_group(init_method=url, backend=backend, rank=rank, world_size=world_size)
def sync_object_ranks(something_to_sync: Any, reference_rank: int, device: torch.device) -> Any:
if _torch_broadcast_object:
package = [something_to_sync]
dist.broadcast_object_list(package, src=reference_rank, group=dist.group.WORLD)
package_sync = package[0]
else:
package_sync = optim.utils.broadcast_object(
something_to_sync, src_rank=reference_rank, group=dist.group.WORLD, dist_device=device
)
return package_sync
class TestSingleRank(unittest.TestCase):
"""
All the following tests do not check for inter-process communication
"""
def setUp(self):
dist_init(0, 1, tempfile.mkstemp()[1])
def tearDown(self):
torch.distributed.destroy_process_group()
def test_create(self):
params = [torch.rand(1)]
o = optim.OSS(params, lr=0.01)
def test_state_dict(self):
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], lr=0.1, momentum=0.9)
x.backward()
o.step()
assert x == torch.tensor([0.9], device=DEVICE)
assert o.optim.state[x]["momentum_buffer"] == torch.tensor([1.0], device=DEVICE)
o.zero_grad()
o.consolidate_state_dict() # Sync state dict in between replicas - even if there are none
state_dict = o.state_dict()
# Check that the state dict is pytorch-compliant key wise
assert "param_groups" in state_dict.keys()
assert "state" in state_dict.keys()
# Check that the pulled state is what we expect, and that we have all the expected keys
assert state_dict["param_groups"][0]["lr"] == 0.1
assert state_dict["param_groups"][0]["momentum"] == 0.9
assert not state_dict["param_groups"][0]["nesterov"]
assert state_dict["param_groups"][0]["weight_decay"] == 0.0
assert state_dict["param_groups"][0]["dampening"] == 0.0
# Check that the pulled state and the .param_groups attribute are in sync
for k in state_dict["param_groups"][0].keys():
if k != "params":
assert state_dict["param_groups"][0][k] == o.param_groups[0][k]
# Check that it's correctly loaded
o = optim.OSS([x], lr=0.01)
o.load_state_dict(state_dict)
# Check that state is correct and on proper device
assert o.optim.state[x]["momentum_buffer"] == torch.tensor([1.0], device=DEVICE)
# We should now be using a lr of 0.1, both within the optimizer
# and as exposed by the .param_groups attribute
assert o.param_groups[0]["lr"] == 0.1
x.backward()
o.step()
assert x == torch.tensor([0.71], device=DEVICE)
assert o.optim.state[x]["momentum_buffer"] == torch.tensor([1.9], device=DEVICE)
# Check that the exposed param_groups are on the proper device
assert o.param_groups[0]["params"][0].device == x.device
def test_lr_scheduler(self):
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
x2 = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], lr=0.01)
o2 = torch.optim.SGD([x2], lr=0.01)
s = torch.optim.lr_scheduler.StepLR(o, 1)
s2 = torch.optim.lr_scheduler.StepLR(o2, 1)
for _ in range(5):
x.backward()
o.zero_grad()
o.step()
s.step()
x2.backward()
o2.zero_grad()
o2.step()
s2.step()
assert x == x2
def test_step_with_kwargs(self):
class SGDWithStepKWArg(torch.optim.SGD):
def step(self, closure=None, kwarg=[]):
super().step()
kwarg.append(5)
kwarg = []
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], SGDWithStepKWArg, lr=0.1)
x.backward()
o.step(0, kwarg=kwarg)
assert kwarg == [5]
assert x == torch.tensor([0.9], device=DEVICE)
def test_step_with_extra_inner_key(self):
class SGDWithNewKey(torch.optim.SGD):
# Dummy optimizer which adds a new key to the param groups
def step(self, closure=None):
super().step()
self.param_groups[0]["new_key"] = 0.1
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], SGDWithNewKey, lr=0.1)
x.backward()
o.step()
assert o.param_groups[0]["new_key"] == 0.1
assert x == torch.tensor([0.9], device=DEVICE)
def test_step_without_closure(self):
class SGDWithoutClosure(torch.optim.SGD):
def step(self):
return super().step()
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], SGDWithoutClosure, lr=0.1)
x.backward()
o.step()
assert x == torch.tensor([0.9], device=DEVICE)
def test_implicit_local_state_dict(self):
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], lr=0.1)
with pytest.raises(RuntimeError):
_ = o.state_dict()
def run_test_add_param_group(rank, world_size, tempfile_name):
dist_init(rank, world_size, tempfile_name)
# Test with all parameters trainable to begin with
def all_trainable():
params = []
sizes = [9, 7, 5, 3]
sizes_world = sizes * world_size
for size in sizes_world[:-1]:
params.append(torch.rand(size, 1))
# Make sure that the params are trainable, enforces size-based partitioning
for p in params:
p.requires_grad = True
o = optim.OSS(params, lr=0.1)
assert len(o.param_groups) == 1
o.add_param_group({"params": [torch.rand(3, 1)]})
assert len(o.param_groups) == 2
# Verify that added group is added to the correct partition making all have the same number of elements
assert sum([x.numel() for g in o.optim.param_groups for x in g["params"]]) == sum(sizes)
assert len(o.optim.param_groups) == 2
# Test a pathological config with a first big non-trainable param
def some_trainable():
params = []
for size in [100, 3, 5, 2, 6, 4]:
params.append(torch.rand(size, 1))
# Make sure that the params are trainable, enforces size-based partitioning
for p in params[1:]:
p.requires_grad = True
o = optim.OSS(params, lr=0.1)
assert len(o.param_groups) == 1
o.add_param_group({"params": [torch.rand(3, 1)]})
assert len(o.param_groups) == 2
assert len(o.optim.param_groups) == 2
all_trainable()
some_trainable()
dist.destroy_process_group()
def test_add_param_group():
world_size = 4
if torch.cuda.is_available() and torch.cuda.device_count() < world_size:
world_size = min(world_size, torch.cuda.device_count())
mp.spawn(run_test_add_param_group, args=(world_size, tempfile.mkstemp()[1]), nprocs=world_size, join=True)
def run_test_zero_grad(rank, world_size, tempfile_name):
dist_init(rank, world_size, tempfile_name)
x = torch.rand(1)
m = torch.nn.Linear(1, 1)
o = optim.OSS(m.parameters(), lr=0.1)
y = m(x)
y.backward(x)
assert m.weight.grad
assert m.bias.grad
o.zero_grad()
assert not m.weight.grad
assert not m.bias.grad
dist.destroy_process_group()
def test_zero_grad():
world_size = 2
if torch.cuda.is_available() and torch.cuda.device_count() < world_size:
world_size = min(world_size, torch.cuda.device_count())
temp_file_name = tempfile.mkstemp()[1]
mp.spawn(run_test_zero_grad, args=(world_size, temp_file_name), nprocs=world_size, join=True)
def run_test_catch_empty_shardd(rank, world_size, tempfile_name):
dist_init(rank, world_size, tempfile_name, backend="gloo")
m = torch.nn.Linear(1, 1)
with pytest.raises(AssertionError):
_ = optim.OSS(m.parameters(), lr=0.1)
dist.destroy_process_group()
def test_empty_shard():
world_size = 4
mp.spawn(run_test_catch_empty_shardd, args=(world_size, tempfile.mkstemp()[1]), nprocs=world_size, join=True)
def run_test_step(rank, world_size, tempfile_name):
dist_init(rank, world_size, tempfile_name, backend="gloo")
x = torch.tensor([float(rank + 1)], device=rank)
m = torch.nn.Linear(1, 1)
m.weight.data = torch.tensor([[1.0]])
m.bias.data = torch.tensor([2.0])
m.to(rank)
o = optim.OSS(m.parameters(), lr=0.1)
y = m(x)
y.backward(x)
for p in m.parameters():
dist.all_reduce(p.grad.data, op=dist.ReduceOp.SUM)
p.grad.data /= world_size
o.step()
assert m.weight == torch.tensor([[0.75]], device=rank)
assert m.bias == torch.tensor([1.85], device=rank)
dist.destroy_process_group()
@skip_if_single_gpu
def test_step():
world_size = 2
temp_file_name = tempfile.mkstemp()[1]
mp.spawn(run_test_step, args=(world_size, temp_file_name), nprocs=world_size, join=True)
def run_test_step_with_closure(rank, world_size, tempfile_name, optimizer=None):
dist_init(rank, world_size, tempfile_name)
x_val = rank + 1
weight = 1.0
bias = 2.0
error = 1.0
target = torch.tensor([x_val * weight + bias + error], device=rank)
loss_fn = torch.nn.L1Loss()
x = torch.tensor([float(x_val)], device=rank)
m = torch.nn.Linear(1, 1)
m.weight.data = torch.tensor([[weight]])
m.bias.data = torch.tensor([bias])
m.to(rank)
o = optim.OSS(m.parameters(), lr=0.1)
y = m(x)
y.backward(x)
for p in m.parameters():
dist.all_reduce(p.grad.data, op=dist.ReduceOp.SUM)
p.grad.data /= world_size
def closure():
o.zero_grad()
output = m(x)
loss = loss_fn(output, target)
loss.backward()
return loss
loss = o.step(closure=closure)
assert loss == torch.tensor(error, device=rank)
assert m.weight == torch.tensor([[1.1]], device=rank)
assert m.bias == torch.tensor([2.1], device=rank)
dist.destroy_process_group()
@skip_if_no_cuda
def test_step_with_closure():
world_size = min(2, torch.cuda.device_count())
temp_file_name = tempfile.mkstemp()[1]
mp.spawn(run_test_step_with_closure, args=(world_size, temp_file_name), nprocs=world_size, join=True)
def run_test_sharding(rank, world_size, tempfile_name):
dist_init(rank, world_size, tempfile_name)
params = []
sizes = [9, 7, 5, 3]
sizes_world = sizes * world_size
for size in sizes_world:
params.append(torch.rand(size, 1))
# Make sure that the params are trainable, enforces size-based partitioning
for p in params:
p.requires_grad = True
o = optim.OSS(params, lr=0.1)
assert sum([x.numel() for x in o.optim.param_groups[0]["params"]]) == sum(sizes)
dist.destroy_process_group()
def test_sharding():
world_size = 4
if torch.cuda.is_available():
world_size = min(world_size, torch.cuda.device_count())
_, temp_file_name = tempfile.mkstemp()
mp.spawn(run_test_sharding, args=(world_size, temp_file_name), nprocs=world_size, join=True)
def run_test_collect_shards(rank, world_size, reference_rank, tempfile_name):
dist_init(rank, world_size, tempfile_name)
device = torch.device(rank) if torch.cuda.device_count() > 1 else DEVICE
# Run a dummy step so that the optimizer state dict exists
batch, input_width, hidden, target_width = 3, 3, 3, 5
target = torch.rand((batch, target_width), device=device)
inputs = torch.rand((batch, input_width), device=device)
model = torch.nn.Sequential(torch.nn.Linear(input_width, hidden), torch.nn.Linear(hidden, target_width))
model.to(device)
loss_fn = torch.nn.L1Loss()
loss_fn.to(device)
# With SGD, Momentum is required to get a state to shard
optimizer = optim.OSS(model.parameters(), lr=0.1, momentum=0.99)
def closure():
optimizer.zero_grad()
output = model(inputs)
loss = loss_fn(output, target)
loss.backward()
return loss
_ = optimizer.step(closure=closure)
# Update the optimizer state on the reference rank
optimizer.consolidate_state_dict(recipient_rank=reference_rank)
# Fetch the state on the reference rank
# - check that it has the correct size
# - load it again
if rank == reference_rank:
optimizer_state_dict = optimizer.state_dict()
assert len(optimizer_state_dict["state"]) == len(list(model.parameters()))
else:
optimizer_state_dict = {}
# distribute to the other ranks
optimizer_state_dict = sync_object_ranks(optimizer_state_dict, reference_rank, device)
# Load the optimizer state dict
optimizer.load_state_dict(optimizer_state_dict)
dist.destroy_process_group()
def test_collect_shards():
world_size = 3
temp_file_name = tempfile.mkstemp()[1]
if torch.cuda.is_available():
world_size = min(world_size, torch.cuda.device_count())
reference_rank = 0
mp.spawn(
run_test_collect_shards, args=(world_size, reference_rank, temp_file_name), nprocs=world_size, join=True,
)
def run_test_reproducibility(rank, world_size, reference_rank, tempfile_name):
dist_init(rank, world_size, tempfile_name)
device = torch.device(rank) if torch.cuda.device_count() > 1 else DEVICE
# Run a dummy step so that the optimizer state dict exists
batch, input_width, hidden, target_width = 3, 3, 3, 5
target = torch.rand((batch, target_width), device=device)
inputs = torch.rand((batch, input_width), device=device)
model = torch.nn.Sequential(torch.nn.Linear(input_width, hidden), torch.nn.Linear(hidden, target_width))
model.to(device)
loss_fn = torch.nn.L1Loss()
loss_fn.to(device)
optimizer = optim.OSS(model.parameters(), optim=torch.optim.RMSprop, lr=0.1)
def closure():
optimizer.zero_grad()
output = model(inputs)
loss = loss_fn(output, target)
loss.backward()
return loss
_ = optimizer.step(closure=closure)
# Update the optimizer state on the reference rank
optimizer.consolidate_state_dict(recipient_rank=reference_rank)
# Fetch the state on the reference rank, broadcast to the other ones
if rank == reference_rank:
optimizer_state_dict = optimizer.state_dict()
else:
optimizer_state_dict = {}
# Run two steps, log the loss
_ = optimizer.step(closure=closure)
reference_loss = optimizer.step(closure=closure)
# Load the optimizer state dict, rewind the state two steps back
optimizer.load_state_dict(optimizer_state_dict)
# Run two new steps, log the loss again and check that we get the same
_ = optimizer.step(closure=closure)
test_loss = optimizer.step(closure=closure)
assert torch.allclose(reference_loss, test_loss)
dist.destroy_process_group()
def test_reproducibility():
world_size = 2
temp_file_name = tempfile.mkstemp()[1]
if torch.cuda.is_available() and torch.cuda.device_count() < world_size:
# Bail out if not enough devices
return
reference_rank = 0
mp.spawn(
run_test_collect_shards, args=(world_size, reference_rank, temp_file_name), nprocs=world_size, join=True,
)
def run_test_multiple_groups(rank, world_size, tempfile_name):
# Only work with the even ranks, to check that the global_rank indexing is properly used
dist_init(rank=rank, world_size=world_size, tempfile_name=tempfile_name, backend="gloo")
sub_group_ranks = [0, 2, 4]
process_group = torch.distributed.new_group(ranks=sub_group_ranks, backend="gloo")
# Make sure that all the ranks get different training data
# So that the sync check in between their models is meaningful
torch.manual_seed(rank)
np.random.seed(rank)
# Standard deep learning setup
device = "cpu"
epochs, batch, input_width, hidden, target_width = 5, 3, 20, 10, 5
loss_fn = torch.nn.L1Loss().to(device)
def check(optimizer):
# Just run a couple of epochs, check that the model is properly updated
for _ in range(epochs):
target = torch.rand((batch, target_width), device=device)
inputs = torch.rand((batch, input_width), device=device)
def closure():
optimizer.zero_grad()
output = model(inputs)
loss = loss_fn(output, target)
loss /= world_size
loss.backward()
dist.all_reduce(loss, group=process_group) # Not strictly needed for the test below
return loss
_ = optimizer.step(closure=closure)
# Check that all the params are the same on all ranks
for pg in optimizer.param_groups:
for p in pg["params"]:
receptacle = [p.clone() for _ in sub_group_ranks] if rank == 0 else []
dist.gather(p, receptacle, dst=0, group=process_group)
if rank == 0:
for sync_p in receptacle[1:]:
assert torch.all(
torch.eq(receptacle[0], sync_p)
), "Models differ in between ranks {} - {}".format(
torch.norm(receptacle[0]), torch.norm(sync_p)
)
if rank in sub_group_ranks:
# Model fitting in the broadcast bucket
model = torch.nn.Sequential(torch.nn.Linear(input_width, hidden), torch.nn.Linear(hidden, target_width)).to(
device
)
# With SGD, Momentum is required to get a state to shard
optimizer = optim.OSS(
model.parameters(), lr=0.1, momentum=0.99, group=process_group, broadcast_buffer_size=2 ** 20
)
check(optimizer)
# Model not-fitting in the broadcast bucket
model = torch.nn.Sequential(torch.nn.Linear(input_width, hidden), torch.nn.Linear(hidden, target_width)).to(
device
)
# With SGD, Momentum is required to get a state to shard
optimizer = optim.OSS(model.parameters(), lr=0.1, momentum=0.99, group=process_group, broadcast_buffer_size=0)
check(optimizer)
dist.destroy_process_group(process_group)
@skip_if_py39_no_cuda
def test_multiple_groups():
world_size = 6
temp_file_name = tempfile.mkstemp()[1]
mp.spawn(
run_test_multiple_groups, args=(world_size, temp_file_name), nprocs=world_size, join=True,
)
def run_gradient_clipping(rank, world_size, tempfile_name):
dist_init(rank, world_size, tempfile_name, backend="gloo")
device = torch.device(rank)
torch.manual_seed(rank) # make sure that the different rank get different data
# Run a dummy step so that the optimizer state dict exists
batch, input_width, hidden, target_width = 3, 20, 10, 5
target = torch.rand((batch, target_width), device=device)
inputs = torch.rand((batch, input_width), device=device)
NORMS = [1.0, 2.0, 1, 2, inf]
CLIP_NORM = 0.3
def check(norm):
model_oss = torch.nn.Sequential(
torch.nn.Linear(input_width, hidden),
torch.nn.Linear(hidden, hidden),
torch.nn.Linear(hidden, target_width),
).to(device)
model = copy.deepcopy(model_oss)
# For this test the gradients are (all) reduced in the same way in between the torch reference and fairscale.
# Normally OSS would use ShardedDDP and only reduce to the proper rank, but this does not change the
# gradient norm computation from OSS and adds a dependency.
# to keep the comparison apples-to-apples DDP is used in both cases
model_oss = DDP(module=model_oss, device_ids=[rank],)
sharded_optimizer = optim.OSS(model_oss.parameters(), lr=0.1, momentum=0.99)
model = DDP(model, device_ids=[rank],)
loss_fn = torch.nn.L1Loss()
loss_fn.to(device)
model.zero_grad()
model_oss.zero_grad()
outputs = model(inputs)
outputs_oss = model_oss(inputs)
loss = loss_fn(outputs, target)
loss.backward()
loss_oss = loss_fn(outputs_oss, target)
loss_oss.backward()
torch.testing.assert_allclose(loss_oss, loss)
# Check the equivalence with the non-sharded optim
oss_total_norm = sharded_optimizer.clip_grad_norm(CLIP_NORM, norm_type=norm)
total_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), CLIP_NORM, norm_type=norm)
assert torch.allclose(oss_total_norm, total_norm), "torch and fairscale should return the same grad norm"
# Check that the params have indeed been clipped
for params in sharded_optimizer.per_device_params.values():
for param in filter(lambda x: x.grad is not None, params[rank]):
assert torch.norm(param.grad, p=norm) < CLIP_NORM, f"param grad norm above clip : {param.grad}"
for norm in NORMS:
print(f"Checking norm {norm}")
check(norm)
# Check twice, catch an hypothetic iterator dumb mistake
check(norm)
dist.destroy_process_group()
@skip_if_no_cuda
def test_gradient_clipping():
world_size = 3
temp_file_name = tempfile.mkstemp()[1]
if torch.cuda.is_available():
world_size = min(world_size, torch.cuda.device_count())
reference_rank = 0
mp.spawn(
run_gradient_clipping, args=(world_size, temp_file_name), nprocs=world_size, join=True,
)
def run_state_dict_distributed(rank, world_size, tempfile_name):
dist_init(rank, world_size, tempfile_name, backend="gloo")
device = torch.device(rank)
torch.manual_seed(rank) # make sure that the different rank get different data
# Setup two problems in parallel, we'll make sure that the second track (with save/load) follows the first one(untouched)
# We split the model in two to test the multiple param groups support
batch, input_width, hidden, target_width = 3, 20, 10, 5
target = torch.rand((batch, target_width), device=device)
inputs = torch.rand((batch, input_width), device=device)
model_oss1 = torch.nn.Sequential(torch.nn.Linear(input_width, hidden), torch.nn.Linear(hidden, hidden)).to(device)
head_oss1 = torch.nn.Linear(hidden, target_width).to(device)
model_oss2 = copy.deepcopy(model_oss1)
head_oss2 = copy.deepcopy(head_oss1)
# For this test the gradients are (all) reduced in the same way in between the torch reference and fairscale.
# Normally OSS would use ShardedDDP and only reduce to the proper rank, but this does not change the
# gradient norm computation from OSS and adds a dependency.
# to keep the comparison apples-to-apples DDP is used in both cases
model_oss1 = DDP(module=model_oss1, device_ids=[rank],)
sharded_optimizer1 = optim.OSS(model_oss1.parameters(), lr=0.1, momentum=0.99)
sharded_optimizer1.add_param_group({"params": head_oss1.parameters()})
model_oss2 = DDP(module=model_oss2, device_ids=[rank],)
sharded_optimizer2 = optim.OSS(model_oss2.parameters(), lr=0.1, momentum=0.99)
sharded_optimizer2.add_param_group({"params": head_oss2.parameters()})
loss_fn = torch.nn.L1Loss().to(device)
def run_grad_step(model, head, optimizer):
model.zero_grad()
outputs = head(model(inputs))
# pull the current state, broadcast it to all ranks
sharded_optimizer2.consolidate_state_dict(recipient_rank=RECIPIENT_RANK) # all ranks
state_dict2 = sharded_optimizer2.state_dict() if rank == RECIPIENT_RANK else {}
state_dict2 = sync_object_ranks(state_dict2, RECIPIENT_RANK, device)
# re-create a new optimizer from scratch with absurd values, load the previous state
sharded_optimizer2 = optim.OSS(model_oss2.parameters(), lr=1e6, momentum=0.0001)
sharded_optimizer2.add_param_group({"params": head_oss2.parameters()})
sharded_optimizer2.load_state_dict(state_dict2)
check_same_model_params(
model_oss1, model_oss2, "parameters of the two identical models have diverged (before any steps)"
)
# now take a step and check that parameters are equal
run_grad_step(model_oss1, head_oss1, sharded_optimizer1)
run_grad_step(model_oss2, head_oss2, sharded_optimizer2)
check_same_model_params(
model_oss1, model_oss2, "parameters of the two identical models have diverged (after stepping)"
)
# save the state dict for one model only, then distribute to the other ranks
sharded_optimizer2.consolidate_state_dict(recipient_rank=RECIPIENT_RANK) # all ranks
state_dict2 = sharded_optimizer2.state_dict() if rank == RECIPIENT_RANK else {}
state_dict2 = sync_object_ranks(state_dict2, RECIPIENT_RANK, device)
# Check that the pulled state and the .param_groups attribute are in sync
for replica in range(len(state_dict2["param_groups"])):
for k in state_dict2["param_groups"][replica].keys():
if k != "params":
assert state_dict2["param_groups"][replica][k] == sharded_optimizer2.param_groups[0][k]
# take a step
run_grad_step(model_oss1, head_oss1, sharded_optimizer1)
run_grad_step(model_oss2, head_oss2, sharded_optimizer2)
check_same_model_params(
model_oss1, model_oss2, "parameters of the two identical models have diverged (after consolidating)"
)
# save again for one rank, then distribute to the others
sharded_optimizer2.consolidate_state_dict(recipient_rank=RECIPIENT_RANK) # all ranks
state_dict2 = sharded_optimizer2.state_dict() if rank == RECIPIENT_RANK else {}
state_dict2 = sync_object_ranks(state_dict2, RECIPIENT_RANK, device)
# reload the state_dict
sharded_optimizer2 = optim.OSS(model_oss2.parameters(), lr=0.1, momentum=0.99)
sharded_optimizer2.add_param_group({"params": head_oss2.parameters()})
sharded_optimizer2.load_state_dict(state_dict2)
# take a step
run_grad_step(model_oss1, head_oss1, sharded_optimizer1)
run_grad_step(model_oss2, head_oss2, sharded_optimizer2)
check_same_model_params(
model_oss1, model_oss2, "parameters of the two identical models have diverged (after reloading)"
)
dist.destroy_process_group()
@skip_if_no_cuda
def test_state_dict_distributed():
world_size = 2
temp_file_name = tempfile.mkstemp()[1]
if torch.cuda.is_available():
world_size = max(world_size, torch.cuda.device_count())
mp.spawn(
run_state_dict_distributed, args=(world_size, temp_file_name), nprocs=world_size, join=True,
)
def run_ddp_parity(rank, world_size, backend, temp_file_name):
url = "file://" + temp_file_name
dist.init_process_group(init_method=url, backend=backend, rank=rank, world_size=world_size)
device = torch.device("cuda")
torch.cuda.set_device(rank)
torch.manual_seed(rank)
np.random.seed(rank)
hidden = 5
in_channels = 3
out_channels = 3
batch = 64
def check_optimizer_equivalence(optimizer: Type[torch.optim.Optimizer], change_train_graph: bool = False):
# Any model works. Add one different buffer per rank
trunk = torch.nn.Sequential(
torch.nn.Linear(in_channels, hidden), torch.nn.Linear(hidden, hidden), torch.nn.Linear(hidden, hidden)
)
trunk.register_buffer("test_buffer", torch.ones((1)) * rank)
trunk.to(device)
head = torch.nn.Linear(hidden, out_channels).to(device)
# Define a model to be trained by OSS
oss_module = torch.nn.Sequential(trunk, head)
oss_trainable_params = [
{"params": trunk.parameters(), "lr": 1e-5},
{"params": head.parameters(), "lr": 1e-4},
]
optimizer_settings: Dict[Any, Any] = {}
if isinstance(optimizer, torch.optim.SGD):
optimizer_settings["momentum"] = 0.9
sharded_optimizer = optim.OSS(
params=oss_trainable_params,
optim=optimizer,
group=None,
broadcast_buffer_size=2 ** 10,
**optimizer_settings,
)
oss_ddp_model = DDP(module=oss_module, device_ids=[rank], broadcast_buffers=True, find_unused_parameters=True)
# Define a model to be trained by normal pytorch + DDP
ddp_trunk = copy.deepcopy(trunk)
ddp_head = copy.deepcopy(head)
ddp_module = torch.nn.Sequential(ddp_trunk, ddp_head)
ddp_trainable_params = [
{"params": ddp_trunk.parameters(), "lr": 1e-5},
{"params": ddp_head.parameters(), "lr": 1e-4},
]
ddp_optimizer = optimizer(ddp_trainable_params, **optimizer_settings) # type: ignore
ddp_model = DDP(module=ddp_module, device_ids=[rank], broadcast_buffers=True, find_unused_parameters=True)
def check_step():
input_tensor = torch.rand((batch, in_channels)).to(device)
def closure_ddp(input_tensor=input_tensor):
ddp_optimizer.zero_grad()
ddp_loss = ddp_model(input_tensor).abs().sum()
ddp_loss.backward()
return ddp_loss
def closure_sharded(input_tensor=input_tensor):
sharded_optimizer.zero_grad()
sharded_loss = oss_ddp_model(input_tensor).abs().sum()
sharded_loss.backward()
return sharded_loss
loss_ddp = cast(torch.Tensor, ddp_optimizer.step(closure=closure_ddp))
loss_sharded_optim = cast(torch.Tensor, sharded_optimizer.step(closure=closure_sharded))
assert torch.allclose(
loss_ddp, loss_sharded_optim, rtol=1e-3
), f"Losses differ in between Pytorch optim and OSS\n {loss_ddp.item()} - {loss_sharded_optim.item()} - world size {world_size}"
check_same_model_params(oss_ddp_model, ddp_model)
# The model should be synchronized in between the ranks at construction time, check that
check_same_model_params(oss_ddp_model, ddp_model)
# The models should stay the same in between ddp and sharded optimizer
for i in range(5):
check_step()
# Check that altering the trainable parameters does not cause DDP and OSS to diverge
if change_train_graph:
# Flip the first parameter from trainable to non-trainable and vice-versa
next(ddp_module.parameters()).requires_grad = not next(ddp_module.parameters()).requires_grad
next(oss_module.parameters()).requires_grad = not next(oss_module.parameters()).requires_grad
# sharded_optimizer.refresh_trainable()
# Check that the checkpoints are compatible
# - get states
ddp_state_dict = ddp_optimizer.state_dict()
sharded_optimizer.consolidate_state_dict(recipient_rank=RECIPIENT_RANK)
sharded_optim_state_dict = sharded_optimizer.state_dict() if rank == RECIPIENT_RANK else {}
sharded_optim_state_dict = sync_object_ranks(sharded_optim_state_dict, RECIPIENT_RANK, device)
# - cross load the states
# run one step and check that the models are still the same
ddp_state_dict_ref = copy.deepcopy(ddp_state_dict) # OSS will remove some states
ddp_optimizer.load_state_dict(sharded_optim_state_dict) # mixup on purpose !
sharded_optimizer.load_state_dict(ddp_state_dict)
check_step()
# - self load, rewind, check no problem
# run one step and check that the models are still the same
ddp_optimizer.load_state_dict(ddp_state_dict_ref)
sharded_optimizer.load_state_dict(sharded_optim_state_dict)
check_step()
for opt in [torch.optim.Adam, torch.optim.SGD]:
check_optimizer_equivalence(opt, change_train_graph=False)
check_optimizer_equivalence(opt, change_train_graph=True)
dist.destroy_process_group()
@skip_if_no_cuda
@skip_if_single_gpu
def test_ddp_parity():
temp_file_name = tempfile.mkstemp()[1]
world_size = torch.cuda.device_count()
backend = dist.Backend.NCCL
mp.spawn(run_ddp_parity, args=(world_size, backend, temp_file_name), nprocs=world_size, join=True)
|
from app.a_star import AStar
from app.common import get_directions
from app.common import check_if_path_in_between_walls
from app.common import add_large_opponent_move_walls
from app.common import remove_large_opponent_move_walls
from app.common import DEBUG_LOGS
def consumption_choices(data, aStar, walls, protectable_area):
snake_size_larger = True
for i in range(len(data['board']['snakes'])):
if (data['board']['snakes'][i]['id'] == data['you']['id']):
continue #skip self
#if 4 larger than all snakes, don't bother to eat, unless hungry
if ((len(data['you']['body']) <= len(data['board']['snakes'][i]['body']) + 4)
and data['you']['health'] >= 30):
snake_size_larger = False
if (snake_size_larger):
if(DEBUG_LOGS):
print("Snake 4 larger than opponents, don't try to eat")
return None, None
nearest_food_directions, nearest_food = locate_food(data['you']['body'][0]['x'], data['you']['body'][0]['y'], data, aStar, walls, protectable_area)
if (nearest_food_directions != None):
#return directions of nearest food
if (DEBUG_LOGS):
print("Direction of closest food: ", nearest_food_directions)
return nearest_food_directions, nearest_food
return None, None
def locate_food(x,y,data, aStar, walls, protectable_area):
food = []
for i in range(len(data['board']['food'])):
if (((data['board']['food'][i]['x'], data['board']['food'][i]['y']) in protectable_area) and
(data['board']['food'][i] not in data['board']['hazards'])):
food.append((data['board']['food'][i]['x'], data['board']['food'][i]['y']))
if (len(food) == 0):
return None, None
shortest_path = None
directions = []
closest_food = None
#for each food, get path, use shortest path
for i in range(len(food)):
add_large_opponent_move_walls(data, aStar, walls)
#set goal to food
aStar.reset_grid_and_start((x,y), (food[i][0], food[i][1]))
remove_large_opponent_move_walls(data, aStar, walls)
#find path, returns list of x,y tuple, starting at head, returns None if no path
path = aStar.solve()
#check if path goes through single lane, if so mark as bad and None
if (path != None):
single_lane = check_if_path_in_between_walls(data, aStar, path, walls)
if (single_lane):
#print("Eat Path is between walls, ignore it: " + str(path))
path = None
#if not single lane and path exists, see if opposing snake that is larger is closer, if so, don't go for food
#"""
else:
#if health high enough, go for food away from other snakes
if (data['you']['health'] >= 50):
for j in range(len(data['board']['snakes'])):
if (data['board']['snakes'][j]['id'] == data['you']['id']):
continue #skip self
if (len(data['you']['body']) >= len(data['board']['snakes'][j]['body'])):
continue #if same or larger size than opponent, ignore check
p1_x = data['board']['snakes'][j]['body'][0]['x']
p1_y = data['board']['snakes'][j]['body'][0]['y']
#goal is current location
if ((p1_x, p1_y) == (food[i][0], food[i][1])):
continue
#remove head from unreachable cells to allow for aStar to path from opponent head to location
aStar.reset_grid_and_remove_wall((p1_x, p1_y), (food[i][0], food[i][1]), (p1_x, p1_y))
opponent_path = aStar.solve()
aStar.add_wall((p1_x, p1_y))
#if (DEBUG_LOGS):
#print("Eat own path: " + str(path))
#print("Eat opponent path: " + str(opponent_path))
if (opponent_path != None and (len(path) >= len(opponent_path))):
if (DEBUG_LOGS):
print("Can't eat food: " + str((food[i][0], food[i][1])) + " " + str(len(path)) + " " + str(len(opponent_path)))
path = None
break
#"""
#if path is good and is horter than other food paths, choose
if ((path != None) and ((shortest_path == None) or (len(path) < len(shortest_path)))):
directions = get_directions(x,y, path[1][0], path[1][1])
shortest_path = path
closest_food = food[i]
if (DEBUG_LOGS):
print("Food Path Chosen: " + str(shortest_path))
if (shortest_path != None):
return directions, closest_food
return None, None |
# Copyright (c) 2019, Danish Technological Institute.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# -*- coding: utf-8 -*-
""" Command to change directory into Tracker projects
"""
import logging
import shutil
import click
from tracker import tracker_file
from tracker.utils import cli, click_utils, projects
log = logging.getLogger(__name__)
def get_project_names(ctx, args, incomplete):
return [k for k in projects.get_project_names() if incomplete in k]
@click.command("rm")
@click.argument(u'project_name', type=click.STRING,
autocompletion=get_project_names)
@click_utils.no_prompt_option
@click.pass_context
@click_utils.use_args
def rm(ctx, args):
"""Remove any project created by Tracker specified by its name.
Prompts user for deletion of files on disk and deletes the
project under the `projects` key in the Tracker home configuration
file (default placed: ~/.tracker/)
"""
# Prompt user for deletion of files on disk
if args.yes or _confirm_rm(args):
_rm_files(args)
# Remove from global configuration file tracker.yaml
trackerfile = tracker_file.TrackerFile()
trackerfile.remove_project(args.project_name)
def _rm_files(args):
# Retrieve project directory by its name
project_dir = projects.get_project_dir_by_name(args.project_name)
# Delete folder
shutil.rmtree(project_dir)
def _confirm_rm(args):
prompt = (
"You are about to delete {}. "
"Do you want to delete all project files on the disk?"
.format(
args.project_name)
)
return cli.confirm(prompt, default=False)
|
from NarrativeService.ServiceUtils import ServiceUtils
class ReportFetcher(object):
def __init__(self, ws_client):
self.ws_client = ws_client
def find_report_from_object(self, upa):
#TODO:
# 1. make sure upa's real.
# first, fetch object references (without data)
ref_list = self.ws_client.list_referencing_objects([{"ref": upa}])[0]
# scan it for a report.
# if we find at least one, return them
# if we find 0, test if it's a copy, and search upstream.
if len(ref_list):
report_upas = list()
for ref_info in ref_list:
if "KBaseReport.Report" in ref_info[2]:
report_upas.append(ServiceUtils.object_info_to_object(ref_info)['ref'])
if len(report_upas):
return self.build_output(upa, report_upas)
else:
return self.find_report_from_copy_source(upa)
else:
return self.find_report_from_copy_source(upa)
def find_report_from_copy_source(self, upa):
"""
Fetch the info about this object. If it's a copy, run find_report_from_object on its source.
If it's not, return an error state, or just an empty list for the upas.
"""
obj_data = self.ws_client.get_objects2({'objects': [{'ref': upa}], 'no_data': 1})['data'][0]
if obj_data.get('copy_source_inaccessible', 0) == 1:
err = "No report found. This object is a copy, and its source is inaccessible."
return self.build_output(upa, [], inaccessible=1, error=err)
elif 'copied' in obj_data:
return self.find_report_from_object(obj_data['copied'])
return self.build_output(upa, [])
def build_output(self, upa, report_upas=[], inaccessible=0, error=None):
retVal = {
"report_upas": report_upas,
"object_upa": upa
}
if inaccessible != 0:
retVal["inaccessible"] = inaccessible
if error is not None:
retVal["error"] = error
return retVal
|
# Generated by Django 3.2.4 on 2021-08-17 01:31
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Countries',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32)),
('alpha2code', models.CharField(max_length=16)),
('capital', models.CharField(max_length=32)),
('population', models.IntegerField()),
('timezones', models.CharField(max_length=16)),
('flag', models.CharField(max_length=64)),
('languages', models.CharField(max_length=256)),
('borders', models.CharField(max_length=256)),
],
),
]
|
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateNodePoolDetails(object):
"""
The properties that define a request to update a node pool.
"""
def __init__(self, **kwargs):
"""
Initializes a new UpdateNodePoolDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param name:
The value to assign to the name property of this UpdateNodePoolDetails.
:type name: str
:param kubernetes_version:
The value to assign to the kubernetes_version property of this UpdateNodePoolDetails.
:type kubernetes_version: str
:param initial_node_labels:
The value to assign to the initial_node_labels property of this UpdateNodePoolDetails.
:type initial_node_labels: list[oci.container_engine.models.KeyValue]
:param quantity_per_subnet:
The value to assign to the quantity_per_subnet property of this UpdateNodePoolDetails.
:type quantity_per_subnet: int
:param subnet_ids:
The value to assign to the subnet_ids property of this UpdateNodePoolDetails.
:type subnet_ids: list[str]
:param node_config_details:
The value to assign to the node_config_details property of this UpdateNodePoolDetails.
:type node_config_details: oci.container_engine.models.UpdateNodePoolNodeConfigDetails
:param node_metadata:
The value to assign to the node_metadata property of this UpdateNodePoolDetails.
:type node_metadata: dict(str, str)
:param node_source_details:
The value to assign to the node_source_details property of this UpdateNodePoolDetails.
:type node_source_details: oci.container_engine.models.NodeSourceDetails
:param ssh_public_key:
The value to assign to the ssh_public_key property of this UpdateNodePoolDetails.
:type ssh_public_key: str
:param node_shape:
The value to assign to the node_shape property of this UpdateNodePoolDetails.
:type node_shape: str
:param node_shape_config:
The value to assign to the node_shape_config property of this UpdateNodePoolDetails.
:type node_shape_config: oci.container_engine.models.UpdateNodeShapeConfigDetails
:param freeform_tags:
The value to assign to the freeform_tags property of this UpdateNodePoolDetails.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this UpdateNodePoolDetails.
:type defined_tags: dict(str, dict(str, object))
"""
self.swagger_types = {
'name': 'str',
'kubernetes_version': 'str',
'initial_node_labels': 'list[KeyValue]',
'quantity_per_subnet': 'int',
'subnet_ids': 'list[str]',
'node_config_details': 'UpdateNodePoolNodeConfigDetails',
'node_metadata': 'dict(str, str)',
'node_source_details': 'NodeSourceDetails',
'ssh_public_key': 'str',
'node_shape': 'str',
'node_shape_config': 'UpdateNodeShapeConfigDetails',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))'
}
self.attribute_map = {
'name': 'name',
'kubernetes_version': 'kubernetesVersion',
'initial_node_labels': 'initialNodeLabels',
'quantity_per_subnet': 'quantityPerSubnet',
'subnet_ids': 'subnetIds',
'node_config_details': 'nodeConfigDetails',
'node_metadata': 'nodeMetadata',
'node_source_details': 'nodeSourceDetails',
'ssh_public_key': 'sshPublicKey',
'node_shape': 'nodeShape',
'node_shape_config': 'nodeShapeConfig',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags'
}
self._name = None
self._kubernetes_version = None
self._initial_node_labels = None
self._quantity_per_subnet = None
self._subnet_ids = None
self._node_config_details = None
self._node_metadata = None
self._node_source_details = None
self._ssh_public_key = None
self._node_shape = None
self._node_shape_config = None
self._freeform_tags = None
self._defined_tags = None
@property
def name(self):
"""
Gets the name of this UpdateNodePoolDetails.
The new name for the cluster. Avoid entering confidential information.
:return: The name of this UpdateNodePoolDetails.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this UpdateNodePoolDetails.
The new name for the cluster. Avoid entering confidential information.
:param name: The name of this UpdateNodePoolDetails.
:type: str
"""
self._name = name
@property
def kubernetes_version(self):
"""
Gets the kubernetes_version of this UpdateNodePoolDetails.
The version of Kubernetes to which the nodes in the node pool should be upgraded.
:return: The kubernetes_version of this UpdateNodePoolDetails.
:rtype: str
"""
return self._kubernetes_version
@kubernetes_version.setter
def kubernetes_version(self, kubernetes_version):
"""
Sets the kubernetes_version of this UpdateNodePoolDetails.
The version of Kubernetes to which the nodes in the node pool should be upgraded.
:param kubernetes_version: The kubernetes_version of this UpdateNodePoolDetails.
:type: str
"""
self._kubernetes_version = kubernetes_version
@property
def initial_node_labels(self):
"""
Gets the initial_node_labels of this UpdateNodePoolDetails.
A list of key/value pairs to add to nodes after they join the Kubernetes cluster.
:return: The initial_node_labels of this UpdateNodePoolDetails.
:rtype: list[oci.container_engine.models.KeyValue]
"""
return self._initial_node_labels
@initial_node_labels.setter
def initial_node_labels(self, initial_node_labels):
"""
Sets the initial_node_labels of this UpdateNodePoolDetails.
A list of key/value pairs to add to nodes after they join the Kubernetes cluster.
:param initial_node_labels: The initial_node_labels of this UpdateNodePoolDetails.
:type: list[oci.container_engine.models.KeyValue]
"""
self._initial_node_labels = initial_node_labels
@property
def quantity_per_subnet(self):
"""
Gets the quantity_per_subnet of this UpdateNodePoolDetails.
The number of nodes to have in each subnet specified in the subnetIds property. This property is deprecated,
use nodeConfigDetails instead. If the current value of quantityPerSubnet is greater than 0, you can only
use quantityPerSubnet to scale the node pool. If the current value of quantityPerSubnet is equal to 0 and
the current value of size in nodeConfigDetails is greater than 0, before you can use quantityPerSubnet,
you must first scale the node pool to 0 nodes using nodeConfigDetails.
:return: The quantity_per_subnet of this UpdateNodePoolDetails.
:rtype: int
"""
return self._quantity_per_subnet
@quantity_per_subnet.setter
def quantity_per_subnet(self, quantity_per_subnet):
"""
Sets the quantity_per_subnet of this UpdateNodePoolDetails.
The number of nodes to have in each subnet specified in the subnetIds property. This property is deprecated,
use nodeConfigDetails instead. If the current value of quantityPerSubnet is greater than 0, you can only
use quantityPerSubnet to scale the node pool. If the current value of quantityPerSubnet is equal to 0 and
the current value of size in nodeConfigDetails is greater than 0, before you can use quantityPerSubnet,
you must first scale the node pool to 0 nodes using nodeConfigDetails.
:param quantity_per_subnet: The quantity_per_subnet of this UpdateNodePoolDetails.
:type: int
"""
self._quantity_per_subnet = quantity_per_subnet
@property
def subnet_ids(self):
"""
Gets the subnet_ids of this UpdateNodePoolDetails.
The OCIDs of the subnets in which to place nodes for this node pool. This property is deprecated,
use nodeConfigDetails instead. Only one of the subnetIds or nodeConfigDetails
properties can be specified.
:return: The subnet_ids of this UpdateNodePoolDetails.
:rtype: list[str]
"""
return self._subnet_ids
@subnet_ids.setter
def subnet_ids(self, subnet_ids):
"""
Sets the subnet_ids of this UpdateNodePoolDetails.
The OCIDs of the subnets in which to place nodes for this node pool. This property is deprecated,
use nodeConfigDetails instead. Only one of the subnetIds or nodeConfigDetails
properties can be specified.
:param subnet_ids: The subnet_ids of this UpdateNodePoolDetails.
:type: list[str]
"""
self._subnet_ids = subnet_ids
@property
def node_config_details(self):
"""
Gets the node_config_details of this UpdateNodePoolDetails.
The configuration of nodes in the node pool. Only one of the subnetIds or nodeConfigDetails
properties should be specified. If the current value of quantityPerSubnet is greater than 0, the node
pool may still be scaled using quantityPerSubnet. Before you can use nodeConfigDetails,
you must first scale the node pool to 0 nodes using quantityPerSubnet.
:return: The node_config_details of this UpdateNodePoolDetails.
:rtype: oci.container_engine.models.UpdateNodePoolNodeConfigDetails
"""
return self._node_config_details
@node_config_details.setter
def node_config_details(self, node_config_details):
"""
Sets the node_config_details of this UpdateNodePoolDetails.
The configuration of nodes in the node pool. Only one of the subnetIds or nodeConfigDetails
properties should be specified. If the current value of quantityPerSubnet is greater than 0, the node
pool may still be scaled using quantityPerSubnet. Before you can use nodeConfigDetails,
you must first scale the node pool to 0 nodes using quantityPerSubnet.
:param node_config_details: The node_config_details of this UpdateNodePoolDetails.
:type: oci.container_engine.models.UpdateNodePoolNodeConfigDetails
"""
self._node_config_details = node_config_details
@property
def node_metadata(self):
"""
Gets the node_metadata of this UpdateNodePoolDetails.
A list of key/value pairs to add to each underlying OCI instance in the node pool on launch.
:return: The node_metadata of this UpdateNodePoolDetails.
:rtype: dict(str, str)
"""
return self._node_metadata
@node_metadata.setter
def node_metadata(self, node_metadata):
"""
Sets the node_metadata of this UpdateNodePoolDetails.
A list of key/value pairs to add to each underlying OCI instance in the node pool on launch.
:param node_metadata: The node_metadata of this UpdateNodePoolDetails.
:type: dict(str, str)
"""
self._node_metadata = node_metadata
@property
def node_source_details(self):
"""
Gets the node_source_details of this UpdateNodePoolDetails.
Specify the source to use to launch nodes in the node pool. Currently, image is the only supported source.
:return: The node_source_details of this UpdateNodePoolDetails.
:rtype: oci.container_engine.models.NodeSourceDetails
"""
return self._node_source_details
@node_source_details.setter
def node_source_details(self, node_source_details):
"""
Sets the node_source_details of this UpdateNodePoolDetails.
Specify the source to use to launch nodes in the node pool. Currently, image is the only supported source.
:param node_source_details: The node_source_details of this UpdateNodePoolDetails.
:type: oci.container_engine.models.NodeSourceDetails
"""
self._node_source_details = node_source_details
@property
def ssh_public_key(self):
"""
Gets the ssh_public_key of this UpdateNodePoolDetails.
The SSH public key to add to each node in the node pool on launch.
:return: The ssh_public_key of this UpdateNodePoolDetails.
:rtype: str
"""
return self._ssh_public_key
@ssh_public_key.setter
def ssh_public_key(self, ssh_public_key):
"""
Sets the ssh_public_key of this UpdateNodePoolDetails.
The SSH public key to add to each node in the node pool on launch.
:param ssh_public_key: The ssh_public_key of this UpdateNodePoolDetails.
:type: str
"""
self._ssh_public_key = ssh_public_key
@property
def node_shape(self):
"""
Gets the node_shape of this UpdateNodePoolDetails.
The name of the node shape of the nodes in the node pool used on launch.
:return: The node_shape of this UpdateNodePoolDetails.
:rtype: str
"""
return self._node_shape
@node_shape.setter
def node_shape(self, node_shape):
"""
Sets the node_shape of this UpdateNodePoolDetails.
The name of the node shape of the nodes in the node pool used on launch.
:param node_shape: The node_shape of this UpdateNodePoolDetails.
:type: str
"""
self._node_shape = node_shape
@property
def node_shape_config(self):
"""
Gets the node_shape_config of this UpdateNodePoolDetails.
Specify the configuration of the shape to launch nodes in the node pool.
:return: The node_shape_config of this UpdateNodePoolDetails.
:rtype: oci.container_engine.models.UpdateNodeShapeConfigDetails
"""
return self._node_shape_config
@node_shape_config.setter
def node_shape_config(self, node_shape_config):
"""
Sets the node_shape_config of this UpdateNodePoolDetails.
Specify the configuration of the shape to launch nodes in the node pool.
:param node_shape_config: The node_shape_config of this UpdateNodePoolDetails.
:type: oci.container_engine.models.UpdateNodeShapeConfigDetails
"""
self._node_shape_config = node_shape_config
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this UpdateNodePoolDetails.
Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The freeform_tags of this UpdateNodePoolDetails.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this UpdateNodePoolDetails.
Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param freeform_tags: The freeform_tags of this UpdateNodePoolDetails.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
Gets the defined_tags of this UpdateNodePoolDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The defined_tags of this UpdateNodePoolDetails.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this UpdateNodePoolDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param defined_tags: The defined_tags of this UpdateNodePoolDetails.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
"""adding_error_codes_table
Revision ID: 6ba89cd39cc8
Revises: 8eac44955fb6
Create Date: 2020-04-14 12:36:57.094459
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '6ba89cd39cc8'
down_revision = '8eac44955fb6'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
error_code_table = op.create_table('error_code',
sa.Column('code', sa.String(length=50), nullable=False),
sa.Column('title', sa.String(length=100), nullable=True),
sa.Column('detail', sa.String(length=500), nullable=True),
sa.PrimaryKeyConstraint('code')
)
op.bulk_insert(
error_code_table,
[
{
'code': 'INVALID_CORP_OR_FILING_TYPE',
'title': 'Invalid Request',
'detail': 'Invalid Corp Type or Filing Type.'
},
{
'code': 'INVALID_PAYMENT_ID',
'title': 'Invalid Request',
'detail': 'Invalid Payment Identifier.'
},
{
'code': 'INVALID_TRANSACTION',
'title': 'Invalid Request',
'detail': 'Transaction is already completed.'
},
{
'code': 'INVALID_REDIRECT_URI',
'title': 'Invalid Request',
'detail': 'Invalid redirect url.'
},
{
'code': 'INVALID_TRANSACTION_ID',
'title': 'Invalid Request',
'detail': 'Invalid transaction identifier.'
},
{
'code': 'INVALID_ACCOUNT_ID',
'title': 'Invalid Request',
'detail': 'Invalid account identifier.'
},
{
'code': 'COMPLETED_PAYMENT',
'title': 'Invalid Request',
'detail': 'Payment is already completed.'
},
{
'code': 'CANCELLED_PAYMENT',
'title': 'Invalid Request',
'detail': 'Payment is already cancelled.'
},
{
'code': 'INVALID_INVOICE_ID',
'title': 'Invalid Request',
'detail': 'Invalid invoice identifier.'
},
{
'code': 'FEE_OVERRIDE_NOT_ALLOWED',
'title': 'Invalid Request',
'detail': 'Fee override is not allowed.'
},
{
'code': 'INCOMPLETE_ACCOUNT_SETUP',
'title': 'Invalid Request',
'detail': 'Premium account setup is incomplete.'
},
{
'code': 'BCOL_ACCOUNT_CLOSED',
'title': 'BC Online account closed',
'detail': 'This BC Online account has been closed. '
'Please contact the help desk. '
'\nSERVICE BC HELP DESK: '
'\nToll-free: 1-800-663-6102 (Canada and USA only)'
'\nFax: (250) 952-6115'
'\nEmail: bcolhelp@gov.bc.ca.'
},
{
'code': 'BCOL_USER_REVOKED',
'title': 'BC Online user revoked',
'detail': 'This BC Online user has been revoked. '
'Please contact the help desk. '
'\nSERVICE BC HELP DESK: '
'\nToll-free: 1-800-663-6102 (Canada and USA only)'
'\nFax: (250) 952-6115'
'\nEmail: bcolhelp@gov.bc.ca.'
},
{
'code': 'BCOL_ACCOUNT_REVOKED',
'title': 'BC Online account revoked',
'detail': 'This BC Online account has been revoked. '
'Please contact the help desk. '
'\nSERVICE BC HELP DESK: '
'\nToll-free: 1-800-663-6102 (Canada and USA only)'
'\nFax: (250) 952-6115'
'\nEmail: bcolhelp@gov.bc.ca.'
},
{
'code': 'BCOL_UNAVAILABLE',
'title': 'BC Online system not available',
'detail': 'BC Online system is not available. Please try again later.'
},
{
'code': 'BCOL_ERROR',
'title': 'Error',
'detail': 'An error occurred during the BC Online transaction. '
'Please contact the help desk. '
'\nSERVICE BC HELP DESK: '
'\nToll-free: 1-800-663-6102 (Canada and USA only)'
'\nFax: (250) 952-6115'
'\nEmail: bcolhelp@gov.bc.ca.'
}
]
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('error_code')
# ### end Alembic commands ###
|
# from app.table import table
#
#
# from app import data
# import csv
#
# a = []
#
# with open('tickers.csv') as csvfile:
# res = csv.reader(csvfile)
# for i in res:
# a.append(i[1])
#
# print(a)
# |
# -*- encoding: utf-8 -*-
"""
Created by eniocc at 11/10/2020
"""
from py_dss_interface.models.Solution.SolutionF import SolutionF
from py_dss_interface.models.Solution.SolutionI import SolutionI
from py_dss_interface.models.Solution.SolutionS import SolutionS
from py_dss_interface.models.Solution.SolutionV import SolutionV
class Solution(SolutionI, SolutionF, SolutionS, SolutionV):
"""
This interface implements the Solution (ISolution) interface of OpenDSS by declaring 4 procedures for accessing
the different properties included in this interface: SolutionI, SolutionF, SolutionS, SolutionV.
"""
pass
|
import pyodbc
from math import ceil
def string_conexao():
# Dados da conexao com o Banco
driver_sql = '{SQL Server}'
server = 'SERVIDOR'
database = 'BANCO_DE_DADOS'
user = 'usuario'
psw = 'senha'
string_de_conexao = f"DRIVER={driver_sql};SERVER={server};DATABASE={database};USER ID={user};PASSWORD={psw}"
return string_de_conexao
def conectar(string_conexao):
conexao = pyodbc.connect(string_conexao, autocommit=True)
return conexao
def executa_sql(conexao, query, *args):
# Essa função aceita uma query com argumentos do tipo
# where nome in (?) e ai você passa uma lista com os paramêtros.
with conexao as cursor:
if args != None:
r = cursor.execute(query, *args)
else:
r = cursor.execute(query)
return r
def txt_query(arq_caminho_completo):
with open(arq_caminho_completo, 'r') as arquivo:
query = arquivo.read()
return query
def divide_insert(lista, inicio, passo):
# Essa função recebe uma lista contendo os values do seu insert
# inicio e passo, para dividir em uma sublista para fazer insert
# por lotes. Exemplo realizar um insert de 1000 em 1000
# inicio = 0 passo = 1000
tamanho = len(lista)
fim = passo
qtde = ceil(len(lista) / passo)
lista_consolidada = []
lista_to_insert = []
total = 0
for i in range(qtde):
if fim > tamanho:
fim = tamanho
lista_to_insert = lista[inicio:fim]
total += len(lista_to_insert)
lista_consolidada.append(lista_to_insert[:])
lista_to_insert.clear()
inicio = fim
fim += passo
return lista_consolidada, total
def executa_insert(cursor, banco_de_dados, tabela, *args):
# Função que irá receber um cursor de conexão, nome do banco e tabela e uma
# tupla contendo os argumentos.
colunas = ','.join([str(coluna).strip().replace(
'\n', '').replace('\r', '') for coluna in args])
query = f'INSERT INTO [RH_RPA].[dbo].[{tabela}] VALUES {colunas}'
print(query)
cursor.execute(query)
cursor.commit()
def truncate_table(cursor, banco_de_dados, tabela):
cursor.execute(f'TRUNCATE TABLE [{banco_de_dados}].[dbo].[{tabela}]')
cursor.commit()
|
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.testutil.pexrc_util import (
setup_pexrc_with_pex_python_path as setup_pexrc_with_pex_python_path,
) # noqa
from pants_test.deprecated_testinfra import deprecated_testinfra_module
deprecated_testinfra_module('pants.testutil.pexrc_util')
|
import json
import requests
import pytz
from datetime import datetime
from beer_search_v2.models import Product, ProductType, ContainerType
from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import BaseCommand
from beer_search_v2.utils import get_country_instance, get_alcohol_category_instance
from .update_stock import Command as StockUpdateCommand
class Command(BaseCommand):
def __init__(self):
self.verbose = True
super().__init__()
def get_data(self):
"""
Steals product data from the internal API of vinbudin.is.
:return: A list of products, as a JSON array. Format not particularly well defined.
"""
domain = "http://www.vinbudin.is"
location = "/addons/origo/module/ajaxwebservices/search.asmx/DoSearch"
url = domain + location
# Mandatory headers, as derived from website request info.
headers = {
"host": "www.vinbudin.is",
"connection": "keep-alive",
"cache-control": "max-age=0",
"accept": "application/json, text/javascript, */*; q=0.01",
"x-requested-with": "XMLHttpRequest",
"user-agent": "Bjorleit/0.2 (+http://bjorleit.info/)",
"content-type": "application/json; charset=utf-8",
"accept-encoding": "gzip, deflate, sdch",
"accept-language": "en-GB,en;q=0.8,is;q=0.6,en-US;q=0.4",
}
# High values break the API.
items_per_iteration = 100
request_params = {
"category": "beer", # This whole k-v pair can be omitted, to search all products
"skip": 0,
"count": items_per_iteration,
"orderBy": "name asc"
}
accumulated_list = []
it_count = 0
max_it = 100 # Safeguard, real stop is at bottom of loop.
while it_count <= max_it:
it_count += 1
json_response = requests.get(
url,
headers=headers,
params=request_params
).json()
# The whole thing is apparently a string inside a JSONObject.
data = json.loads(json_response["d"])
data = data["data"] # Nesting fun
num_fetched = len(data)
if self.verbose:
print("Fetched " + str(num_fetched) + " products")
if num_fetched > 0:
accumulated_list.extend(data)
# Moving on
request_params["skip"] += items_per_iteration
else:
break
return accumulated_list
@classmethod
def prepare_products_for_update(cls):
# Marking all existing products from ÁTVR as not available until proven wrong.
for product in Product.objects.filter(atvr_id__isnull=False).all():
product.available_in_atvr = False
product.save()
@classmethod
def clean_atvr_id(cls, atvr_id):
"""
ATVR ids are strings, but the API returns an integer.
The integers must be zero-padded and converted to strings for urls.
"""
stringified_id = str(atvr_id)
while len(stringified_id) < 5:
stringified_id = "0" + stringified_id
return stringified_id
@classmethod
def clean_date(cls, raw_date):
"""
Converts the API's date format to a Python-friendly format.
"""
first_seen_at = datetime.strptime(raw_date, "%Y-%m-%dT%H:%M:%S")
return pytz.utc.localize(first_seen_at)
@classmethod
def update_product_type(cls, product, json_object):
"""
Each product is an instance of a particular product type, this common info is stored separately.
"""
if not product.product_type_id:
try:
product_type = ProductType.objects.get(name=product.name)
print("Associated {} with a pre-existing ProductType by name".format(product.name))
except ObjectDoesNotExist:
product_type = ProductType()
product_type.name = product.name
product_type.abv = json_object["ProductAlchoholVolume"]
product_type.country = get_country_instance(json_object["ProductCountryOfOrigin"])
product_type.alcohol_category = get_alcohol_category_instance(json_object["ProductCategory"]["name"])
product_type.save()
print("Created new product type: {0}".format(product_type.name))
product.product_type = product_type
product.save()
@classmethod
def find_container_type(cls, atvr_name):
"""
The ATVR database contains container info with non-human-friendly
names. This function finds the appropriate Bjórleit Container type.
"""
if atvr_name == "FL.":
container_type = ContainerType.objects.get(name="Flaska")
elif atvr_name == "DS.":
container_type = ContainerType.objects.get(name="Dós")
elif atvr_name == "KÚT.":
container_type = ContainerType.objects.get(name="Kútur")
elif "ASKJA" in atvr_name:
container_type = ContainerType.objects.get(name="Gjafaaskja")
else:
container_type = ContainerType.objects.get(name="Ótilgreint")
return container_type
def update_products(self, product_list):
for json_object in product_list:
product_id = self.clean_atvr_id(json_object["ProductID"])
product = self.get_product_instance(json_object, product_id)
if not product.container_id:
raw_container_name = json_object["ProductContainerType"]
product.container = self.find_container_type(raw_container_name)
self.update_product_type(product, json_object)
product.available_in_atvr = not not product.atvr_stock
if not product.first_seen_at:
product.first_seen_at = self.clean_date(json_object["ProductDateOnMarket"])
new_price = json_object["ProductPrice"]
if product.price != new_price and self.verbose:
print("Price change for {}, changing from {} to {} krónur".format(
str(product), product.price, new_price)
)
product.price = new_price # We always update the price
product.save()
@classmethod
def get_product_instance(cls, json_object, atvr_id):
try: # Checking if we've found the product previously
product = Product.objects.get(atvr_id=atvr_id)
except ObjectDoesNotExist:
product = Product()
product.atvr_id = atvr_id
cls.initialize_product(product, json_object)
return product
@classmethod
def initialize_product(cls, product, json_object):
print("New product created: " + json_object["ProductName"])
product.name = json_object["ProductName"]
product.price = json_object["ProductPrice"]
product.volume = int(json_object["ProductBottledVolume"])
product.first_seen_at = cls.clean_date(json_object["ProductDateOnMarket"])
product.temporary = json_object["ProductIsTemporaryOnSale"]
stock_information = StockUpdateCommand().get_product_data(product.atvr_id)
product.atvr_stock = stock_information["stores"]
return product
def handle(self, *args, **options):
try:
product_list = self.get_data()
except ConnectionError:
print("Unable to connect to vinbudin.is")
product_list = []
if len(product_list) > 0:
self.prepare_products_for_update()
self.update_products(product_list)
print("It is now recommended to run update_stock")
|
import py
from rpython.translator.cli.test.runtest import CliTest
import rpython.translator.oosupport.test_template.dict as oodict
class TestCliDict(CliTest, oodict.BaseTestDict):
def test_dict_of_dict(self):
py.test.skip("CLI doesn't support recursive dicts")
def test_recursive(self):
py.test.skip("CLI doesn't support recursive dicts")
def test_dict_of_void_special_case(self):
def fn(n):
d = {}
for i in xrange(n):
d[i] = None
return d[0]
assert self.interpret(fn, [2]) is None
def test_dict_with_void_key(self):
def fn(flag):
d = {}
if flag:
d[None] = flag
return bool(d)
res = self.interpret(fn, [42])
assert res is True
## XXX: it fails because of a bug in the annotator, which thinks the
## last line always raises
## def test_dict_with_void_key_pbc(self):
## d = {}
## def fn(flag):
## if flag:
## d[None] = flag
## return d[None]
## res = self.interpret(fn, [42], backendopt=False)
## assert res == 42
class TestCliEmptyDict(CliTest, oodict.BaseTestEmptyDict):
pass
class TestCliConstantDict(CliTest, oodict.BaseTestConstantDict):
pass
|
import numpy as np
from faker import Faker
fake = Faker()
# generate nodes
def generate_nodes(N, M):
'''
Each node has 4 attributes:
-> Year of Birth(Integer)
-> Name(String)
-> Gender(M/F)
-> Num Posts(int)
-> Num Friends(int)
-> Unique ID(int)
'''
birth_years = [i for i in range(1980, 2015)]
genders = ["M", "F"]
colors = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
num_posts = list(range(30, 400))
num_friends = list(range(50, 1000))
graph_nodes = []
graph_edges = []
for i in range(N):
year_of_birth = np.random.choice(birth_years)
gender = np.random.choice(genders)
if gender == "M":
name = fake.first_name_male()
else:
name = fake.first_name_female()
posts = np.random.choice(num_posts)
friends = np.random.choice(num_friends)
new_node = (year_of_birth, name, gender, posts, friends, i + 1)
graph_nodes.append(new_node)
present_nodes = []
i = 0
while i < M:
node1 = np.random.randint(1, N + 1)
node2 = np.random.randint(1, N + 1)
if (node1, node2) in present_nodes:
continue
if node1 == node2:
continue
color = np.random.choice(colors)
present_nodes.append((node1, node2))
new_edge = (node1, node2, color)
graph_edges.append(new_edge)
i += 1
return len(colors), graph_nodes, graph_edges
def main():
N = 8000
M = 40000
C, V, E = generate_nodes(N, M)
print(N, M, C)
for i in range(len(V)):
print(V[i][0], V[i][1], V[i][2], V[i][3], V[i][4], V[i][5])
for i in range(len(E)):
print(E[i][0], E[i][1], E[i][2])
if __name__ == "__main__":
main()
|
class Item:
def __init__(self, name, description, amount, individual_value):
self.name = name
self.description = description
self.amount = amount
self.individual_value = individual_value
@property
def worth(self):
return f'${self.amount * self.individual_value:.2f}'
def sell(self):
if self.amount >= 1:
print('How many do you want to sell?')
amt = int(input('amt > '))
print(f'Are you sure you want to sell {self.amount} {self.name} for ${self.individual_value * amt:.2f}?')
confirm = input('[y/n] > ')
if confirm == 'y':
self.amount -= amt
print(f'{amt} {self.name} sold for ${amt * self.individual_value:.2f}!')
else:
pass
pass
def add_to_inventory(self, inventory):
if len(inventory.items) < inventory.capacity:
inventory.items.append(self)
print(f'x{self.amount} {self.name} added to your Inventory')
else:
print('No room for more items...')
class Inventory:
def __init__(self, capacity):
self.capacity = capacity
self.items = []
def show(self):
index = 1
for item in self.items:
print(str(f'{index} -> [x{item.amount}] {item.name}'))
index += 1
def drop_item(self):
print('\nWhich item do you want to drop? ["0" to Quit]')
self.show()
i = int(input('\nNº > '))
if i == 0:
print('\nClosing the Inventory...')
quit()
item = self.items[i - 1]
if item.amount == 1:
amt = 1
self.items.pop(i - 1)
print(f'Item {item.name}[x{amt}] Dropped!\nNow your Inventory is this:')
else:
print(f'You have {item.amount} of this, how many do you want to drop?')
amt = int(input('amt > '))
if item.amount <= 0:
amt = 0
self.items.pop(item)
print(f'Item {item.name}[x{amt}] Dropped!\nNow your Inventory is this:')
item.amount -= amt
print(f'Item {item.name}[x{amt}] Dropped!\nNow your Inventory is this:')
self.show()
self.drop_item()
@property
def total_worth(self):
return f'\nThe inventory Total Worth is: ${sum([i.individual_value * i.amount for i in self.items]):.2f}'
# Declaring the Inventory
inventory = Inventory(6)
# Declaring some Items to Populate this Inventory
knife = Item('Knife', 'A normal sized and little rusted Knife.', 1, 25)
knife.add_to_inventory(inventory)
potion = Item('Health Potion', 'A small flask of what appears to be blood..', 12, 45)
potion.add_to_inventory(inventory)
sword = Item('Sword', 'A rusted Sword, you see some scripts on it.', 1, 548.90)
sword.add_to_inventory(inventory)
idol = Item('Cthullu Idol', 'You see a bizarre figure sitted in a little altar...', 1, 159804.60)
idol.add_to_inventory(inventory)
# Checking the Total Worth of the Inventory
print(inventory.total_worth)
# Calling the Function for dropping items.
inventory.drop_item()
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 NORDUnet A/S
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. Neither the name of the NORDUnet nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from flask import Blueprint, abort, request
from eduid_common.api.decorators import MarshalWith, UnmarshalWith
from eduid_common.api.helpers import check_magic_cookie
from eduid_common.api.messages import CommonMsg, FluxData, error_response, success_response
from eduid_common.api.schemas.base import FluxStandardAction
from eduid_userdb.exceptions import EduIDUserDBError
from eduid_webapp.signup.app import current_signup_app as current_app
from eduid_webapp.signup.helpers import (
SignupMsg,
check_email_status,
complete_registration,
remove_users_with_mail_address,
)
from eduid_webapp.signup.schemas import AccountCreatedResponse, EmailSchema, RegisterEmailSchema
from eduid_webapp.signup.verifications import (
AlreadyVerifiedException,
CodeDoesNotExist,
ProofingLogFailure,
send_verification_mail,
verify_email_code,
verify_recaptcha,
)
signup_views = Blueprint('signup', __name__, url_prefix='', template_folder='templates')
@signup_views.route('/trycaptcha', methods=['POST'])
@UnmarshalWith(RegisterEmailSchema)
@MarshalWith(AccountCreatedResponse)
def trycaptcha(email: str, recaptcha_response: str, tou_accepted: bool) -> FluxData:
"""
Kantara requires a check for humanness even at level AL1.
"""
if not tou_accepted:
return error_response(message=SignupMsg.no_tou)
recaptcha_verified = False
# add a backdoor to bypass recaptcha checks for humanness,
# to be used in testing environments for automated integration tests.
if check_magic_cookie(current_app.conf):
current_app.logger.info('Using BACKDOOR to verify reCaptcha during signup!')
recaptcha_verified = True
# common path with no backdoor
if not recaptcha_verified:
remote_ip = request.remote_addr
if current_app.conf.recaptcha_public_key and current_app.conf.recaptcha_private_key:
recaptcha_verified = verify_recaptcha(current_app.conf.recaptcha_private_key, recaptcha_response, remote_ip)
else:
current_app.logger.info('Missing configuration for reCaptcha!')
if recaptcha_verified:
next = check_email_status(email)
if next == 'new':
# Workaround for failed earlier sync of user to userdb: Remove any signup_user with this e-mail address.
remove_users_with_mail_address(email)
send_verification_mail(email)
return success_response(payload=dict(next=next), message=SignupMsg.reg_new)
elif next == 'resend-code':
return success_response(payload=dict(next=next))
elif next == 'address-used':
current_app.stats.count(name='address_used_error')
return error_response(payload=dict(next=next), message=SignupMsg.email_used)
return error_response(message=SignupMsg.no_recaptcha)
@signup_views.route('/resend-verification', methods=['POST'])
@UnmarshalWith(EmailSchema)
@MarshalWith(FluxStandardAction)
def resend_email_verification(email: str):
"""
The user has not yet verified the email address.
Send a verification message to the address so it can be verified.
"""
current_app.logger.debug("Resend email confirmation to {!s}".format(email))
send_verification_mail(email)
current_app.stats.count(name='resend_code')
return success_response(message=SignupMsg.resent_success)
@signup_views.route('/verify-link/<code>', methods=['GET'])
@MarshalWith(FluxStandardAction)
def verify_link(code: str) -> FluxData:
try:
user = verify_email_code(code)
except CodeDoesNotExist:
return error_response(payload=dict(status='unknown-code'), message=SignupMsg.unknown_code)
except AlreadyVerifiedException:
return error_response(payload=dict(status='already-verified'), message=SignupMsg.already_verified)
except ProofingLogFailure:
return error_response(message=CommonMsg.temp_problem)
except EduIDUserDBError:
return error_response(payload=dict(status='unknown-code'), message=SignupMsg.unknown_code)
return complete_registration(user)
@signup_views.route('/get-code', methods=['GET'])
def get_email_code():
"""
Backdoor to get the email verification code in the staging or dev environments
"""
try:
if check_magic_cookie(current_app.conf):
email = request.args.get('email')
signup_user = current_app.private_userdb.get_user_by_pending_mail_address(email)
code = signup_user.pending_mail_address.verification_code
return code
except Exception:
current_app.logger.exception("Someone tried to use the backdoor to get the email verification code for signup")
abort(400)
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Optimizer that computes a moving average of the variables.
Empirically it has been found that using the moving average of the trained
parameters of a deep network is better than using its trained parameters
directly. This optimizer allows you to compute this moving average and swap the
variables at save time so that any code outside of the training loop will use by
default the averaged values instead of the original ones.
Example of usage:
```python
// Encapsulate your favorite optimizer (here the momentum one)
// inside the MovingAverageOptimizer.
opt = tf.train.MomentumOptimizer(learning_rate, FLAGS.momentum)
opt = tf.contrib.opt.MovingAverageOptimizer(opt)
// Then create your model and all its variables.
model = build_model()
// Add the training op that optimizes using opt.
// This needs to be called before swapping_saver().
opt.minimize(cost, var_list)
// Then create your saver like this:
saver = opt.swapping_saver()
// Pass it to your training loop.
slim.learning.train(
model,
...
saver=saver)
```
Note that for evaluation, the normal saver should be used instead of
swapping_saver().
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import moving_averages
from tensorflow.python.training import optimizer
from tensorflow.python.training import saver
class MovingAverageOptimizer(optimizer.Optimizer):
"""Optimizer wrapper that maintains a moving average of parameters."""
def __init__(self, opt, average_decay=0.9999, sequential_update=True):
"""Construct a new MovingAverageOptimizer.
Args:
opt: A tf.Optimizer that will be used to compute and apply gradients.
average_decay: Float. Decay to use to maintain the moving averages
of trained variables.
See tf.train.ExponentialMovingAverage for details.
sequential_update: Bool. If False, will compute the moving average at the
same time as the model is updated, potentially doing
benign data races.
If True, will update the moving average after gradient
updates.
"""
self._optimizer = opt
self._ema = moving_averages.ExponentialMovingAverage(average_decay)
self._variable_map = None
self._sequential_update = sequential_update
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
train_op = self._optimizer.apply_gradients(
grads_and_vars, global_step=global_step, name=name)
var_list = [x[1] for x in grads_and_vars if x[0] is not None]
self._variable_map = {}
if self._sequential_update:
with ops.control_dependencies([train_op]):
ma_op = self._ema.apply(var_list)
else:
ma_op = self._ema.apply(var_list)
for v in var_list:
v_avg = self._ema.average(v)
self._variable_map[v.op.name] = v_avg
self._variable_map[v_avg.op.name] = v
return control_flow_ops.group(train_op, ma_op, name="train_with_avg")
def swapping_saver(self, var_list=None, name='swapping_saver', **kwargs):
"""Create a saver swapping moving averages and variables.
You should use this saver during training. It will save the moving averages
of the trained parameters under the original parameter names. For
evaluations or inference you should use a regular saver and it will
automatically use the moving averages for the trained variable.
You must call this function after all variables have been created and after
you have called Optimizer.minimize().
Args:
var_list: List of variables to save, as per `Saver()`.
If set to None, will save all the variables that have been
created before this call.
name: The name of the saver.
**kwargs: Keyword arguments of `Saver()`.
Returns:
A `tf.Saver` object.
Raises:
RuntimeError: If apply_gradients or minimize has not been called before.
"""
if self._variable_map is None:
raise RuntimeError('Must call apply_gradients or minimize before '
'creating the swapping_saver')
if var_list is None:
var_list = variables.all_variables()
if not isinstance(var_list, dict):
var_list = saver.BaseSaverBuilder.OpListToDict(var_list)
# Now swap variables and moving averages
swapped_var_list = {}
for k, v in six.iteritems(var_list):
v_swap = self._variable_map.get(v.op.name, None)
if v_swap:
swapped_var_list[k] = v_swap
else:
swapped_var_list[k] = v
# Build the swapping saver.
return saver.Saver(swapped_var_list, name=name, **kwargs)
|
import os
from flask import Flask, render_template, request
import solver_handler
app = Flask(__name__)
@app.route('/')
def home_page():
return render_template('enter_hexdump.html')
# route and function to handle the upload page
@app.route('/', methods=['GET', 'POST'])
def enter_hexdump_page():
if request.method == 'POST':
e00 = request.form['e00']
e01 = request.form['e01']
e02 = request.form['e02']
e03 = request.form['e03']
e04 = request.form['e04']
e05 = request.form['e05']
e10 = request.form['e10']
e11 = request.form['e11']
e12 = request.form['e12']
e13 = request.form['e13']
e14 = request.form['e14']
e15 = request.form['e15']
e20 = request.form['e20']
e21 = request.form['e21']
e22 = request.form['e22']
e23 = request.form['e23']
e24 = request.form['e24']
e25 = request.form['e25']
e30 = request.form['e30']
e31 = request.form['e31']
e32 = request.form['e32']
e33 = request.form['e33']
e34 = request.form['e34']
e35 = request.form['e35']
e40 = request.form['e40']
e41 = request.form['e41']
e42 = request.form['e42']
e43 = request.form['e43']
e44 = request.form['e44']
e45 = request.form['e45']
e50 = request.form['e50']
e51 = request.form['e51']
e52 = request.form['e52']
e53 = request.form['e53']
e54 = request.form['e54']
e55 = request.form['e55']
hexdump = [[e00, e01, e02, e03, e04, e05],
[e10, e11, e12, e13, e14, e15],
[e20, e21, e22, e23, e24, e25],
[e30, e31, e32, e33, e34, e35],
[e40, e41, e42, e43, e44, e45],
[e50, e51, e52, e53, e54, e55]
]
ram = request.form['ram']
d1 = request.form['d1']
d2 = request.form['d2']
d3 = request.form['d3']
solver_handler.breach(hexdump, ram, d1, d2, d3)
return render_template('enter_hexdump.html')
if __name__ == '__main__':
app.run()
|
import numpy as np
def find(x):
"""
Input:
- x: the process dataset in numpy array format
Output:
- mu, sigma, theta
Ornstein–Uhlenbeck process with long-term mean mu, volatility sigma, and mean reversion speed theta.
"""
s_x = np.sum(x[:-1])
s_y = np.sum(x[1:])
s_xx = np.sum(x[:-1]**2)
s_yy = np.sum(x[1:]**2)
s_xy = np.sum(x[:-1] * x[1:])
n = len(x)-1
delta = 1
mu = ((s_y*s_xx)-(s_x*s_xy))/(n*(s_xx-s_xy)-((s_x**2)-s_x*s_y)) # Mean
theta = -(1/delta)*np.log((s_xy-mu*s_x-mu*s_y+n*mu**2)/(s_xx-2*mu*s_x+n*mu**2)) # Rate
alpha = np.exp(-theta*delta)
sigma_h = np.sqrt((1/n)*(s_yy-(2*alpha*s_xy)+((alpha**2)*s_xx)-(2*mu*(1-alpha)*(s_y-alpha*s_x))+(n*(mu**2)*(1-alpha)**2)))
sigma = np.sqrt((sigma_h**2)*(2*theta/(1-alpha**2))) #Volatility
return mu, sigma, theta
|
#
# MLDB-1713-wildcard-groupby.py
# Mathieu Bolduc, 2016-08-15
# This file is part of MLDB. Copyright 2016 mldb.ai inc. All rights reserved.
#
from mldb import mldb, MldbUnitTest, ResponseException
class MLDB1713WildcardGroupby(MldbUnitTest): # noqa
def test_wildcard_groupby(self):
msg = "Wildcard cannot be used with GROUP BY"
with self.assertRaisesRegex(ResponseException, msg):
mldb.query('select * from (select 1 as a) group by a')
if __name__ == '__main__':
mldb.run_tests()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.