content stringlengths 5 1.05M |
|---|
# !/usr/bin/env python3
# encoding: utf-8
"""
@version: 0.1
@author: feikon
@license: Apache Licence
@contact: crossfirestarer@gmail.com
@site: https://github.com/feikon
@software: PyCharm
@file: problem_0001.py
@time: 2017/6/11 10:14
"""
# Problem describe:generate active code
# Problem solve step:
# 1.Generate random;
# 2.Cannot same(use dict erase the same one);
import random
import string
def generate_active_code(code_length, code_numbers=200):
result = {}
# Done: append #$%^... to character_pole (have any simple method to add #%^&*?)
character_pole = list(string.ascii_uppercase) # append A-Z to pole
other_character = ['!', '@', '#', '$', '%', '^', '&', '*']
for ch in other_character:
character_pole.append(ch)
for num in range(0, 10):
character_pole.append(str(num)) # append 0-9 to pole
while len(result) < code_numbers:
key = ''
for i in range(code_length):
key += random.choice(character_pole)
if key in result:
pass
else:
result[key] = 1
# for key in result:
# print(key)
return result # result 为一个字典
if __name__ == '__main__':
# generate_active_code(10, 10)
result = generate_active_code(16)
for key_order, avtive_code in enumerate(result):
print(key_order, avtive_code)
|
"""
Modified from https://github.com/rwightman/pytorch-image-models/blob/master/timm/utils/metrics.py
"""
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
maxk = min(max(topk), output.size()[1])
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
# TODO: add tensor.t() method
pred = pred.transpose(-1, -2)
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
return [
correct[: min(k, maxk)].reshape(-1).float().sum(0) * 100.0 / batch_size
for k in topk
]
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
Utility code to translate between python objects and AMQP encoded data
fields.
"""
from io import BytesIO
from struct import pack, calcsize, unpack
class EOF(Exception):
pass
class Codec:
def __init__(self, stream):
self.stream = stream
self.nwrote = 0
self.nread = 0
self.incoming_bits = []
self.outgoing_bits = []
def read(self, n):
data = self.stream.read(n)
if n > 0 and len(data) == 0:
raise EOF()
self.nread += len(data)
return data
def write(self, s):
self.flushbits()
self.stream.write(s)
self.nwrote += len(s)
def flush(self):
self.flushbits()
self.stream.flush()
def flushbits(self):
if len(self.outgoing_bits) > 0:
bytes_list = []
index = 0
for b in self.outgoing_bits:
if index == 0:
bytes_list.append(0)
if b:
bytes_list[-1] |= 1 << index
index = (index + 1) % 8
del self.outgoing_bits[:]
for byte in bytes_list:
self.encode_octet(byte)
def pack(self, fmt, *args):
self.write(pack(fmt, *args))
def unpack(self, fmt):
size = calcsize(fmt)
data = self.read(size)
values = unpack(fmt, data)
if len(values) == 1:
return values[0]
else:
return values
def encode(self, field_type, field_value):
getattr(self, "encode_" + field_type)(field_value)
def decode(self, field_type):
return getattr(self, "decode_" + field_type)()
# bit
def encode_bit(self, o):
if o:
self.outgoing_bits.append(True)
else:
self.outgoing_bits.append(False)
def decode_bit(self):
if len(self.incoming_bits) == 0:
bits = self.decode_octet()
for shift in range(8):
self.incoming_bits.append(bits >> shift & 1 != 0)
return self.incoming_bits.pop(0)
# octet
def encode_octet(self, o):
self.pack("!B", o)
def decode_octet(self):
return self.unpack("!B")
# short
def encode_short(self, o):
self.pack("!H", o)
def decode_short(self):
return self.unpack("!H")
# long
def encode_long(self, o):
self.pack("!L", o)
def decode_long(self):
return self.unpack("!L")
# longlong
def encode_longlong(self, o):
self.pack("!Q", o)
def decode_longlong(self):
return self.unpack("!Q")
def enc_str(self, fmt, s):
size = len(s)
self.pack(fmt, size)
if not isinstance(s, bytes):
s = s.encode()
self.write(s)
def enc_bytes(self, fmt, s):
size = len(s)
self.pack(fmt, size)
self.write(s)
def dec_str(self, fmt):
size = self.unpack(fmt)
data = self.read(size)
# Oppertunistic binary decode
try:
data = data.decode()
except UnicodeDecodeError:
pass
return data
def dec_bytes(self, fmt):
size = self.unpack(fmt)
return self.read(size)
# shortstr
def encode_shortstr(self, s):
self.enc_str("!B", s)
def decode_shortstr(self):
return self.dec_str("!B")
# longstr
def encode_longstr(self, s):
if isinstance(s, dict):
self.encode_table(s)
else:
self.enc_str("!L", s)
def encode_longbytes(self, s):
if isinstance(s, dict):
self.encode_table(s)
else:
self.enc_bytes("!L", s)
def decode_longstr(self):
return self.dec_str("!L")
def decode_longbytes(self):
return self.dec_bytes("!L")
# timestamp
def encode_timestamp(self, o):
self.pack("!Q", o)
def decode_timestamp(self):
return self.unpack("!Q")
def _write_value(self, value):
if isinstance(value, (str, bytes)):
self.write(b"S")
self.encode_longstr(value)
elif value is None:
self.encode_void()
elif isinstance(value, list):
self.write(b'A')
self.encode_array(value)
elif isinstance(value, int):
self.write(b"I")
self.encode_long(value)
else:
raise TypeError('Got unknown type %s for encoding' % type(value))
# array
def encode_array(self, arr):
enc = BytesIO()
codec = Codec(enc)
for value in arr:
codec._write_value(value)
s = enc.getvalue()
self.encode_long(len(s))
self.write(s)
# table
def encode_table(self, tbl):
enc = BytesIO()
codec = Codec(enc)
for key, value in tbl.items():
codec.encode_shortstr(key)
codec._write_value(value)
s = enc.getvalue()
self.encode_long(len(s))
self.write(s)
def decode_table(self):
size = self.decode_long()
start = self.nread
result = {}
while self.nread - start < size:
key = self.decode_shortstr()
item_type = self.read(1)
if item_type == b"S":
value = self.decode_longstr()
elif item_type == b"I":
value = self.decode_long()
elif item_type == b"F":
value = self.decode_table()
elif item_type == b"t":
value = (self.decode_octet() != 0)
else:
raise ValueError(repr(item_type))
result[key] = value
return result
# void
def encode_void(self):
self.write(b"V")
def decode_void(self):
return None
|
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.template.defaultfilters import slugify
from django.test import TestCase
from markdown2 import markdown
from post.models import Channel, Question
from threads.forms import reply_form
from threads.models import Answer
class TestThreadViewsGet(TestCase):
@classmethod
def setUpTestData(cls):
cls.digi = User.objects.create_user(username="digi", password="password")
cls.digi.save()
cls.jerry = User.objects.create_user(username="jerry", password="password")
cls.jerry.save()
cls.q = Question.objects.create(
title="Testing thread views.",
metatype="discussion",
description="Ello there mate",
author="digi"
)
Channel.objects.create(name="testing1")
Channel.objects.create(name="testing2")
slug = slugify(cls.q.title)
cls.url = reverse('thread', args=[cls.q.id, slug])
def test_thread_not_logged(self):
# Thread not logged in test
url = self.url
expected_url = self.q.get_absolute_url()
self.assertEqual(url, expected_url)
resp = self.client.get(url)
print(resp)
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, "thread.html")
self.assertTemplateUsed(resp, "contribute_modal.html")
self.assertTemplateUsed(resp, "thread_replies.html")
self.assertTemplateUsed(resp, "reply_modal.html")
self.assertTemplateUsed(resp, "reply_author_modal.html")
# The text which is shown when there are no users
self.assertContains(resp, 'Hey there, stranger? If you want to contribute to this discussion, please log in so that we can recognise you! :D')
def test_thread_logged(self):
# Thread logged test with no answers
url = self.url
self.client.login(username="digi", password="password")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, "thread.html")
# Context Variables
self.assertEqual(resp.context["post"], self.q)
self.assertEqual(len(resp.context["nodes"]), 0)
self.assertIsInstance(resp.context["form"], reply_form)
def test_thread_delete(self):
# Test whether the delete button is there
# just for the author.
q = self.q
url = self.url
delete_url = reverse('delete_post', args=[q.id])
# Author
self.client.login(username="digi", password="password")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.context["post"], q)
self.assertContains(resp, delete_url)
self.assertTemplateUsed(resp, "thread_author_panel.html")
self.client.logout()
# Not the author
self.client.login(username="jerry", password="password")
resp = self.client.get(url)
self.assertEqual(resp.context["post"], q)
self.assertNotContains(resp, delete_url)
def test_thread_nodes(self):
# Test thread with answers
url = self.url
a = Answer.objects.create(
question=self.q,
description="Hello answer.",
answer_author="digi"
)
nodes = Answer.objects.all()
for node in nodes:
node.description = markdown(node.description, extras=["tables", "cuddled-lists"])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.context["post"], self.q)
self.assertEqual(str(resp.context["nodes"]), str(nodes))
"""
def test_discuss(self):
url = reverse('discuss')
expected_url = "/new/discuss"
self.assertEqual(url, expected_url)
resp = self.client.get(url)
# Should redirect to home if not logged in
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp.url, "/")
# Create an authenticated session
self.client.login(username="username", password="password")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, "new.html")
# Context Variables
channels = str(Channel.objects.all())
self.assertEqual(resp.context["metatype"], "discussion")
self.assertEqual(str(resp.context["channels"]), channels)
self.assertIsInstance(resp.context["form"], post_form)
def test_submit_get(self):
url = reverse('submit', kwargs={"metatype": "question"})
# Should redirect to home in both cases
# 1) not logged in
resp = self.client.get(url)
self.assertRedirects(resp, "/")
# 2) logged in
self.client.login(username="username", password="password")
resp = self.client.get(url)
self.assertRedirects(resp, "/")
def test_submit_post_without_login(self):
url = reverse('submit', kwargs={"metatype": "question"})
# Posts without auth must redirect to home
resp = self.client.post(url, {})
self.assertRedirects(resp, "/")
def test_submit_simple_posts(self):
url = reverse('submit', kwargs={"metatype": "question"})
title = "Testing question posting"
desc = "Description of our test question."
simpledata = {
"title": title,
"description": desc,
"selectedchannels": ""
}
self.client.login(username="username", password="password")
q = Question.objects.all()
self.assertEqual(len(q), 0)
# Follow = True makes the client grab the redirected url too.
resp = self.client.post(url, simpledata, follow=True)
self.assertEqual(resp.status_code, 200)
# Tests for the newly created question.
q = Question.objects.all()
self.assertEqual(len(q), 1)
self.assertEqual(q[0].title, title)
self.assertEqual(q[0].description, desc)
self.assertEqual(q[0].channels.count(), 0)
self.assertEqual(q[0].metatype, "question")
# Test for posting of a discussion
url = reverse('submit', kwargs={"metatype": "discussion"})
resp = self.client.post(url, simpledata, follow=True)
self.assertEqual(resp.status_code, 200)
q = Question.objects.all()
self.assertEqual(len(q), 2)
self.assertEqual(q[1].metatype, "discussion")
def test_submit_post_with_channels(self):
url = reverse('submit', kwargs={"metatype": "question"})
title = "Testing post posting"
desc = "Description of our test question with channels."
simpledata = {
"title": title,
"description": desc,
"selectedchannels": "testing1"
}
self.client.login(username="username", password="password")
resp = self.client.post(url, simpledata, follow=True)
self.assertEqual(resp.status_code, 200)
q = Question.objects.first()
c = Channel.objects.get(name="testing1")
self.assertEqual(len(Question.objects.all()), 1)
self.assertEqual(q.title, title)
self.assertEqual(q.channels.count(), 1)
self.assertEqual(q.channels.all()[0], c)
# Test for posting of a discussion
url = reverse('submit', kwargs={"metatype": "discussion"})
simpledata["selectedchannels"] = "testing1,testing2"
resp = self.client.post(url, simpledata, follow=True)
self.assertEqual(resp.status_code, 200)
q = Question.objects.last()
c = str(Channel.objects.all())
self.assertEqual(q.metatype, "discussion")
self.assertEqual(q.channels.count(), 2)
self.assertEqual(str(q.channels.all()), c)
"""
class TestMarkdown(TestCase):
@classmethod
def setUpTestData(cls):
u = User.objects.create_user(username="digi", password="password")
q = Question.objects.create(
title="The title of the question.",
description="# Header\n**bold**\n*italic*",
author="digi"
)
a = Answer.objects.create(
question=q,
description="### Small header\n**bold**\n*italic*",
answer_author="digi"
)
slug = slugify(q.title)
url = reverse('thread', args=[q.id, slug])
cls.q = q
cls.url = url
def test_question_desc(self):
resp = self.client.get(self.url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.context["post"], self.q)
desc = markdown(self.q.description, extras=["tables", "cuddled-lists"])
def test_answer_desc(self):
resp = self.client.get(self.url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.context["post"], self.q)
node = Answer.objects.first()
node.description = markdown(node.description, extras=["tables", "cuddled-lists"])
self.assertEqual(resp.context["nodes"][0], node)
|
# -*- coding: utf-8 -*-
"""
Module containing the available commands of the game, the factory class to
create the commands...
Today, the available commands are:
- look,
- talk,
- move,
- enter,
- exit,
- take,
- drop,
- inventory,
- stats,
- help,
- quit,
- attack
- save
"""
import core.command
from core.commands import look, talk, move, enter, exit, take, drop, inventory,\
help, open, stats, attack, save
from core.localisation import _
import sys
"""
Code corresponding to the quit command
"""
quit = -1
class factory:
"""
Class to instanciate a command from a string.
"""
"""
Available commands stored in a dict with as key, the commands and as value,
the command class to execute.
"""
mapping = {
_('LOOK_COMMAND'): {'allowed_while_fighting': True, 'command': 'look'},
_('TALK_COMMAND'): {'allowed_while_fighting': False, 'command': 'talk'},
_('MOVE_COMMAND'): {'allowed_while_fighting': True, 'command': 'move'},
_('ENTER_COMMAND'): {'allowed_while_fighting': True, 'command': 'enter'},
_('EXIT_COMMAND'): {'allowed_while_fighting': True, 'command': 'exit'},
_('TAKE_COMMAND'): {'allowed_while_fighting': False, 'command': 'take'},
_('DROP_COMMAND'): {'allowed_while_fighting': False, 'command': 'drop'},
_('INVENTORY_COMMAND'): {'allowed_while_fighting': True, 'command': 'inventory'},
_('INVENTORY_SHORT_COMMAND'): {'allowed_while_fighting': True, 'command': 'inventory'},
_('STATS_COMMAND'): {'allowed_while_fighting': True, 'command': 'stats'},
_('HELP_COMMAND'): {'allowed_while_fighting': True, 'command': 'help'},
_('OPEN_COMMAND'): {'allowed_while_fighting': False, 'command': 'open'},
_('SAVE_COMMAND'): {'allowed_while_fighting': False, 'command': 'save'},
_('ATTACK_COMMAND'): {'allowed_while_fighting': True, 'command': 'attack'}
}
@staticmethod
def create(p, commandFull, savedGameId=None):
"""
command.factory.create(p, commandFull, savedGameId=None) -> command.command
Create the desired command.
@param p player.player Current player.
@param commandFull list command to run, the first element of the list
is the command, the other elements are the command's arguments.
@return the created command
"""
cmd = commandFull[0]
del commandFull[0]
if cmd in (_('QUIT_COMMAND'), _('QUIT_SHORT_COMMAND')):
return quit
elif cmd in factory.mapping.keys():
cmd = factory.mapping[cmd]
module = sys.modules['core.commands.' + cmd['command']]
if p.isFighting() and not cmd['allowed_while_fighting']:
raise core.command.exception(_('ERROR_DENIED_COMMAND_WHILE_FIGHTING'))
cmd = getattr(module, cmd['command'])()
else:
raise core.command.exception(_('ERROR_UNKNOWN_COMMAND'))
cmd.setArgs(commandFull)
cmd.setPlayer(p)
cmd.setSavedGameId(savedGameId)
return cmd
|
import json
import constants
def save_topic_words_json(model, topics_indices, out_filename):
topics = []
for idx in topics_indices:
topic = model.get_topic_terms(idx, topn=constants.WORDS_PER_TOPIC_JSON)
topics.append([{
'word': model.id2word[id_],
'prob': str(prob)
} for id_, prob in topic])
with open(out_filename, 'w') as out_file:
json.dump(topics, out_file, indent=4)
|
from . import PluginBase
__all__ = ['Echo']
class Echo(PluginBase):
def execute(self, args):
return ' '.join(args)
def help(self):
return """[text...]
echo says a line of text.
ex)
> echo hoge
hoge
"""
|
# Jogo de aventura
# Em um jogo de aventura, o dano causado por um personagem é igual à sua força total. A força total é a soma da força do personagem com os adicionais de força dos equipamentos. Um personagem é representado por um dicionário como o mostrado a seguir (ATENÇÃO: este é apenas um exemplo):
# {
# 'nome': 'Herói',
# 'força': 4,
# 'vida': 25,
# 'equipamentos': [
# {
# 'nome': 'Martelo Mortal',
# 'força': 15,
# },
# {
# 'nome': 'Luva Leve',
# 'força': 2,
# },
# ],
# }
# Neste exemplo, o dano causado pelo personagem seria 4+15+2=21. Considere outro exemplo:
# {
# 'nome': 'Outro Herói',
# 'força': 18,
# 'vida': 42,
# 'equipamentos': [],
# }
# Neste caso, o dano causado pelo personagem seria 18, pois a lista de equipamentos está vazia. Faça uma função que recebe um dicionário representando os atributos de um personagem e retorna o dano causado por ele.
# O nome da sua função deve ser calcula_dano. |
import os
import itertools
import random
from misc import logger, utils, global_vars, constants
from misc.utils import FlowSrcDst
from domain.network_premitives import GenSingleFlow, NetworkUpdate, NetworkUpdateInfo
from path_generators import PathGenerator
from ez_lib import ez_flow_tool
from ez_lib.ez_topo import Ez_Topo
from collections import defaultdict, deque
from copy import deepcopy, copy
class FlowChangeGenerator(object):
def __init__(self, path_generator=None, rng=random.Random()):
self.rng = rng
self.log = self.init_logger()
if path_generator is None:
path_generator = PathGenerator()
self.path_generator = path_generator
self.pairs = []
self.no_of_middleboxes = 0
def random_src_dst_gen(self, switch_list):
while True:
src_switch = self.rng.choice(switch_list)
if src_switch == max(switch_list):
continue
dst_switch = self.rng.choice(switch_list)
while dst_switch <= src_switch:
dst_switch = self.rng.choice(switch_list)
yield src_switch, dst_switch
def get_src_dst(self):
pass
def ordering_src_dst(self, switch_list, tm):
list_src_dst = []
for src in xrange(len(switch_list)):
for dst in xrange(src + 1, len(switch_list)):
list_src_dst.append(FlowSrcDst(src, dst, tm[src][dst], tm[dst][src]))
list_src_dst.sort()
return list_src_dst
@staticmethod
def init_logger():
return logger.getLogger("flow_generator", constants.LOG_LEVEL)
@staticmethod
def generate_traffic_matrix(id_nodes):
tm = {}
t_in = {}
t_out = {}
total_in = 0
total_out = 0
# generate the first N_1 values as random exponential variables
for i in range(0, len(id_nodes) - 1):
n = id_nodes[i]
t_in[n] = random.expovariate(1)
t_out[n] = random.expovariate(1)
total_in += t_in[n]
total_out += t_out[n]
# adjust the matrix with the last element
last = id_nodes[len(id_nodes) - 1]
if total_in > total_out:
t_in[last] = random.expovariate(1)
total_in += t_in[last]
t_out[last] = total_in - total_out
total_out += t_out[last]
else:
t_out[last] = random.expovariate(1)
total_out += t_out[last]
t_in[last] = total_out - total_in
total_in += t_in[last]
# print "\ninput vector: %s\noutput vector: %s\n
# total input traffic: %d\ntotal output traffic: %d"
# %(t_in, t_out, total_in, total_out)
# compute the traffic matrix according to the gravity model, see equation (1) in
# "Simplifying the synthesis of Internet Traffic Matrices", M. Roughan, in CCR 2005
max_vol = 0
for n in id_nodes:
tm[n] = {}
for m in id_nodes:
traffic_vol = (t_in[n] * t_out[m]) / total_in
tm[n][m] = traffic_vol
if max_vol < traffic_vol:
max_vol = traffic_vol
return tm, max_vol
@staticmethod
def normalize_by_unit(id_nodes, tm, max_vol):
for n in id_nodes:
for m in id_nodes:
tm[n][m] = tm[n][m] * float(constants.MAX_CAP) / (1.2 * max_vol)
def set_back_to_old_flow(self, new_flow, old_flow, link_caps):
new_flow.path = old_flow.path
new_flow.vol = old_flow.vol
self.path_generator.allocate_link_cap(new_flow.path, link_caps, new_flow.vol, new_flow.reversed_vol)
def generate_flows(self, topo, old_tm, flow_cnt):
return NetworkUpdate([], [])
def compute_new_vol(self, old_vol):
lower_bound, upper_bound = constants.DELTA_VOLUME
delta = self.rng.uniform(lower_bound, upper_bound)
new_vol = delta * old_vol
while (new_vol >= constants.MAX_CAP):
delta = self.rng.uniform(lower_bound, upper_bound)
new_vol = delta * old_vol
return new_vol
@staticmethod
def has_statistic_info(line):
strs = line.strip("\n").split("\t")
if len(strs) > 7:
return True
return False
def read_statistic_info(self, flow_file):
flow_reader = open(flow_file, 'r')
line = flow_reader.readline()
statistic_line = None
has_statistic_line = self.has_statistic_info(line)
if has_statistic_line:
statistic_line = copy(line)
network_update_info = NetworkUpdateInfo()
network_update_info.set_statistic_info_from_string(statistic_line)
return network_update_info
return None
#@staticmethod
def read_flows(self, flow_file, checking_deadlock=False):
flow_reader = open(flow_file, 'r')
old_caps = {}
new_caps = {}
old_flows = []
new_flows = []
line = flow_reader.readline()
statistic_line = None
has_statistic_line = self.has_statistic_info(line)
if has_statistic_line:
statistic_line = copy(line)
line = flow_reader.readline()
while line:
strs = line.strip('\n').split("\t")
if len(strs) <= 1:
continue
end_points = strs[0].strip('(').strip(')').split(',')
src, dst = (int(end_points[0]), int(end_points[1]))
old_vol = float(strs[1])
old_flow = GenSingleFlow(len(old_flows), src, dst, old_vol)
path_items = strs[2].strip('[').strip(']').split(',')
if path_items[0] == '':
old_flow.path = []
else:
for item in path_items:
old_flow.path.append(int(item))
old_flows.append(old_flow)
new_vol = float(strs[3])
new_flow = GenSingleFlow(len(new_flows), src, dst, new_vol)
path_items = strs[4].strip('[').strip(']').split(',')
if path_items[0] == '':
new_flow.path = []
else:
for item in path_items:
new_flow.path.append(int(item))
new_flows.append(new_flow)
line = flow_reader.readline()
flow_reader.close()
if checking_deadlock and ez_flow_tool.has_deadlock(old_flows, new_flows):
return NetworkUpdate([], [])
update = NetworkUpdate(old_flows, new_flows)
if statistic_line:
update.set_statistic_info_from_string(statistic_line)
return update
def print_flow(self, src, dst, old_vol, new_vol, old_path, new_path):
end_points = str("(%d, %d)" % (src, dst))
old_path_str = str("%f\t%s" % (old_vol, old_path))
new_path_str = str("%f\t%s" % (new_vol, new_path))
return str("%s\t%s\t%s\n" % (end_points, old_path_str, new_path_str))
def add_statistic_info(self, update, old_flows, new_flows, old_link_caps, new_link_caps):
if len(old_link_caps) == 0:
update.stat_info.min_old_utilizing = 0
update.stat_info.max_old_utilizing = 0
update.stat_info.avg_old_utilizing = 0
else:
update.stat_info.min_old_utilizing = constants.MAX_CAP - max(old_link_caps.values())
update.stat_info.max_old_utilizing = constants.MAX_CAP - min(old_link_caps.values())
update.stat_info.avg_old_utilizing = constants.MAX_CAP - (
sum(old_link_caps.values()) / len(old_link_caps.values()))
update.stat_info.free_old_link = len(global_vars.link_capacities.values()) - len(old_link_caps.values())
if len(old_link_caps) == 0:
update.stat_info.min_new_utilizing = 0
update.stat_info.max_new_utilizing = 0
update.stat_info.avg_new_utilizing = 0
else:
update.stat_info.min_new_utilizing = constants.MAX_CAP - max(new_link_caps.values())
update.stat_info.max_new_utilizing = constants.MAX_CAP - min(new_link_caps.values())
update.stat_info.avg_new_utilizing = constants.MAX_CAP - (
sum(new_link_caps.values()) / len(new_link_caps.values()))
update.stat_info.free_new_link = len(global_vars.link_capacities.values()) - len(new_link_caps.values())
update.stat_info.no_of_segments_by_count = self.analyze_pivot_switches(old_flows, new_flows)
# return "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s%s\n" % (min_old_utilizing, max_old_utilizing,
# avg_old_utilizing, free_old_link,
# min_new_utilizing, max_new_utilizing,
# avg_new_utilizing, free_new_link, str_segment_no)
def check_update_before_writing(self, update, tm):
old_flows = []
new_flows = []
for old_flow, new_flow in itertools.izip(update.old_flows, update.new_flows):
vol = tm[old_flow.src][old_flow.dst]
reversed_vol = tm[old_flow.dst][old_flow.src]
old_flow_1 = GenSingleFlow(len(old_flows), old_flow.src, old_flow.dst, vol, old_flow.update_type)
old_flow_1.path = old_flow.path
old_flows.append(old_flow_1)
old_flow_2 = GenSingleFlow(len(old_flows), old_flow.dst, old_flow.src, reversed_vol, old_flow.update_type)
old_flow_2.path = list(reversed(old_flow.path))
old_flows.append(old_flow_2)
new_flow_1 = GenSingleFlow(len(new_flows), new_flow.src, new_flow.dst, vol, new_flow.update_type)
new_flow_1.path = new_flow.path
new_flows.append(new_flow_1)
new_flow_2 = GenSingleFlow(len(new_flows), new_flow.dst, new_flow.src, reversed_vol, new_flow.update_type)
new_flow_2.path = list(reversed(new_flow.path))
new_flows.append(new_flow_2)
return not ez_flow_tool.has_deadlock(old_flows, new_flows)
def analyze_pivot_switches_for_flow(self, old_flow, new_flow, no_of_segments_by_flow_id):
to_sames, segs_length = ez_flow_tool.path_to_ops_by_link(old_flow.flow_id, None, None,
old_flow, new_flow)
if not no_of_segments_by_flow_id.has_key(segs_length):
no_of_segments_by_flow_id[segs_length] = 1
else:
no_of_segments_by_flow_id[segs_length] += 1
def print_pivot_switches_info(self, no_of_segments_by_flow_id):
count = 0
sum = 0
str_output = ""
for key in no_of_segments_by_flow_id.keys():
sum += key * no_of_segments_by_flow_id[key]
str_output += "\t%d:%d" % (key, no_of_segments_by_flow_id[key])
self.log.info("Number of flows having %d segment(s): %d" % (key, no_of_segments_by_flow_id[key]))
avg = sum/float(len(no_of_segments_by_flow_id.values()))
if count > 0:
self.log.info("Average number of segments: %f" % avg)
return str_output
def write_flows(self, flow_file, update, write_reversed_flow=True):
flow_writer = open(flow_file, 'w')
str_statistic = "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s%s\n" % (update.stat_info.min_old_utilizing,
update.stat_info.max_old_utilizing,
update.stat_info.avg_old_utilizing,
update.stat_info.free_old_link,
update.stat_info.min_new_utilizing,
update.stat_info.max_new_utilizing,
update.stat_info.avg_new_utilizing,
update.stat_info.free_new_link,
self.print_pivot_switches_info(
update.stat_info.no_of_segments_by_count
))
flow_writer.write(str_statistic)
str_flows = ""
for old_flow, new_flow in itertools.izip(update.old_flows, update.new_flows):
self.log.debug(old_flow)
self.log.debug(new_flow)
old_vol = old_flow.vol
new_vol = new_flow.vol
str_flows += self.print_flow(old_flow.src, old_flow.dst,
old_vol, new_vol,
old_flow.path, new_flow.path)
if write_reversed_flow:
old_reversed_vol = old_flow.reversed_vol
new_reversed_vol = new_flow.reversed_vol
str_flows += self.print_flow(old_flow.dst, old_flow.src,
old_reversed_vol, new_reversed_vol,
list(reversed(old_flow.path)),
list(reversed(new_flow.path)))
flow_writer.write(str_flows)
flow_writer.close()
def return_flows(self, old_flows, new_flows, old_link_caps, new_link_caps):
ret_old_flows = []
ret_new_flows = []
for (src, dst) in old_flows.keys():
if old_flows[(src, dst)].path == [] and new_flows[(src, dst)].path == []:
new_flows.pop((src, dst), None)
continue
ret_old_flows.append(old_flows[(src, dst)])
ret_new_flows.append(new_flows[(src, dst)])
network_update = NetworkUpdate(ret_old_flows, ret_new_flows)
self.add_statistic_info(network_update, network_update.old_flows, network_update.new_flows, old_link_caps, new_link_caps)
return network_update
def parse_args(self, args, log):
data_directory = "../%s/%s" % (args.data_folder, args.topology)
ez_topo = Ez_Topo()
if args.topology_type == constants.TOPO_ROCKETFUEL:
topo = ez_topo.create_rocketfuel_topology(data_directory)
elif args.topology_type == constants.TOPO_ADJACENCY:
topo = ez_topo.create_topology_from_adjacency_matrix(data_directory)
elif args.topology_type == constants.TOPO_WEIGHTED_ADJACENCY:
topo = ez_topo.create_latency_topology_from_adjacency_matrix(data_directory)
else:
raise Exception("What topology type")
flow_folder = utils.get_flow_folder(data_directory, args.topology_type,
args.generating_method, str(args.number_of_flows),
str(args.failure_rate))
return topo, flow_folder
def create_flows(self, args, log):
topo, flow_folder = self.parse_args(args, log)
if not os.path.exists(flow_folder):
os.makedirs(flow_folder)
for i in xrange(args.number_of_tests):
tm, max_vol = self.generate_traffic_matrix(topo.edge_switches())
self.normalize_by_unit(topo.edge_switches(), tm, max_vol)
update = self.generate_flows(topo, tm, args.number_of_flows)
flow_file = "%s/%s_%s.intra" % (flow_folder, constants.FLOW_FILE_NAME, str(i))
self.write_flows(flow_file, update)
def get_to_try(self, topo, flow):
tried = flow.src, flow.dst
tries = [sw for sw in topo.edge_switches() if sw not in tried]
return tries
def generate_middleboxes(self, topo, flow):
count = 0
tries = self.get_to_try(topo, flow)
while count < self.no_of_middleboxes:
new_middlebox = self.rng.choice(tries)
tries.remove(new_middlebox)
count += 1
flow.mdbxes.append(new_middlebox)
def generate_one_state(self, topo, tm, flow_cnt):
src_dst_queue = deque(deepcopy(self.pairs))
# src_dst_gen = self.random_src_dst_gen(topo.edge_switches())
link_caps = defaultdict()
flows = defaultdict()
while len(src_dst_queue) > 0 and len(flows) < flow_cnt:
flow_src_dst = src_dst_queue.pop()
src = flow_src_dst.lt_id
dst = flow_src_dst.gt_id
vol = flow_src_dst.vol
while len(src_dst_queue) > 0 and (flows.has_key((src, dst)) or tm[src][dst] == 0
or not self.path_generator.check_eligible_src_dst(topo, link_caps,
src, dst,
tm[src][dst],
tm[dst][src])):
flow_src_dst = src_dst_queue.popleft()
src = flow_src_dst.lt_id
dst = flow_src_dst.gt_id
vol = flow_src_dst.vol
flow = GenSingleFlow(len(flows), src, dst, vol, update_type=constants.ADDING_FLOW,
reversed_vol=flow_src_dst.reversed_vol)
# self.generate_middleboxes(topo, flow)
self.path_generator.attempts = len(topo.edge_switches())
flow.path = self.path_generator.generate_path(topo, flow, link_caps)
if not flow.path:
continue
# flow.vol = tm[src][dst]
flows[(src, dst)] = flow
# reversed_flow = Flow(len(flows), dst, src, tm[dst][src])
# reversed_flow.path = list(reversed(flow.path))
# flows[(dst, src)] = reversed_flow
return flows, link_caps
def generate_one_state_from_old(self, topo, tm, flow_cnt, old_flows):
src_dst_queue = deque(deepcopy(self.pairs))
link_caps = defaultdict()
flows = defaultdict()
empty_path_count = 0
for (src, dst) in old_flows.keys():
flow = deepcopy(old_flows[(src, dst)])
flow.vol = self.compute_new_vol(flow.vol)
flow.reversed_vol = self.compute_new_vol(flow.reversed_vol)
src_dst_queue.remove(FlowSrcDst(src, dst, tm[src][dst], tm[dst][src]))
is_old_no_path = (flow.path == [])
flow.path = []
self.path_generator.attempts = len(topo.edge_switches())
flow.path = self.path_generator.generate_path(topo, flow, link_caps)
if flow.path or (not flow.path and not is_old_no_path):
if not flow.path and not is_old_no_path:
flow.update_type = constants.REMOVING_FLOW
flows[(src, dst)] = flow
elif not flow.path and is_old_no_path:
old_flows.pop((src, dst))
if not flow.path:
empty_path_count += 1
while len(src_dst_queue) > 0 and empty_path_count > 0:
flow_src_dst = src_dst_queue.popleft()
src = flow_src_dst.lt_id
dst = flow_src_dst.gt_id
vol = flow_src_dst.vol
flow = GenSingleFlow(len(flows), src, dst, vol,
update_type = constants.ADDING_FLOW,
reversed_vol= flow_src_dst.reversed_vol)
self.generate_middleboxes(topo, flow)
self.path_generator.attempts = len(topo.edge_switches())
flow.path = self.path_generator.generate_path(topo, flow, link_caps)
if flow.path:
old_flow = GenSingleFlow(len(flows), src, dst, vol, reversed_vol=flow_src_dst.reversed_vol)
flows[(src, dst)] = flow
old_flows[(src, dst)] = old_flow
empty_path_count -= 1
return flows, link_caps
@staticmethod
def generate_empty_state(flows):
empty_flows = defaultdict()
link_caps = {}
for (src, dst) in flows.keys():
flow = deepcopy(flows[(src, dst)])
flow.path = []
empty_flows[(src, dst)] = flow
for o_pair in global_vars.link_capacities.keys():
link_caps[o_pair] = global_vars.link_capacities[o_pair]
return empty_flows, link_caps
def create_continuously_series_of_flows(self, args, log):
topo, flow_folder = self.parse_args(args, log)
if not os.path.exists(flow_folder):
os.makedirs(flow_folder)
tm, max_vol = self.generate_traffic_matrix(topo.edge_switches())
self.normalize_by_unit(topo.edge_switches(), tm, max_vol)
self.pairs = self.ordering_src_dst(topo.edge_switches(), tm)
self.log.info(tm)
new_dict_flows, new_link_caps = self.generate_one_state(topo, tm, args.number_of_flows)
old_dict_flows, old_link_caps = self.generate_empty_state(new_dict_flows)
flow_file = "%s/%s_0.intra" % (flow_folder, constants.FLOW_FILE_NAME)
network_update = self.return_flows(old_dict_flows, new_dict_flows, old_link_caps, new_link_caps)
self.write_flows(flow_file, network_update)
number_of_updates = 1
while number_of_updates < args.number_of_tests:
old_dict_flows = deepcopy(new_dict_flows)
old_link_caps = deepcopy(new_link_caps)
dict_flows, link_caps = \
self.generate_one_state_from_old(topo, tm, args.number_of_flows, old_dict_flows)
network_update = self.return_flows(old_dict_flows, dict_flows, old_link_caps, link_caps)
# if self.check_update_before_writing(network_update, tm):
self.log.info("number of update: %d" % number_of_updates)
flow_file = "%s/%s_%s.intra" % (flow_folder, constants.FLOW_FILE_NAME, str(number_of_updates))
# network_update = self.return_flows(old_dict_flows, new_dict_flows, old_link_caps, new_link_caps)
self.write_flows(flow_file, network_update)
number_of_updates += 1
new_dict_flows = dict_flows
new_link_caps = link_caps
def write_flows_pair(self, flow_file, old_flows, new_flows):
flow_writer = open(flow_file, 'w')
flow_writer.write('something\n')
str_flows = ""
for old_flow, new_flow in itertools.izip(old_flows, new_flows):
self.log.debug(old_flow)
self.log.debug(new_flow)
if old_flow.path != [] or new_flow.path != []:
old_vol = old_flow.vol
new_vol = new_flow.vol
str_flows += self.print_flow(old_flow.src, old_flow.dst,
old_vol, new_vol,
old_flow.path, new_flow.path)
flow_writer.write(str_flows)
flow_writer.close()
def analyze_pivot_switches(self, old_flows, new_flows):
no_of_segments_by_count = {}
for old_flow, new_flow in itertools.izip(old_flows, new_flows):
self.analyze_pivot_switches_for_flow(old_flow, new_flow, no_of_segments_by_count)
return no_of_segments_by_count
# self.print_pivot_switches_info(no_of_segments_by_count) |
import unittest
import os
from programy.parser.pattern.factory import PatternNodeFactory
from programy.parser.pattern.nodes.root import PatternRootNode
from programy.parser.pattern.nodes.word import PatternWordNode
class PatternFactoryTests(unittest.TestCase):
def test_init(self):
factory = PatternNodeFactory()
self.assertIsNotNone(factory)
self.assertEquals({}, factory._nodes_config)
self.assertEqual("Pattern", factory._type)
def assert_nodes(self, factory):
self.assertEquals(12, len(factory._nodes_config))
self.assertTrue("root" in factory._nodes_config)
instance = factory._nodes_config["root"]
root = instance()
self.assertIsInstance(root, PatternRootNode)
self.assertTrue("word" in factory._nodes_config)
instance = factory._nodes_config["word"]
word = instance("test")
self.assertIsInstance(word, PatternWordNode)
def test_load_nodes_config_from_file(self):
factory = PatternNodeFactory()
factory.load_nodes_config_from_file(os.path.dirname(__file__) + os.sep + "pattern_nodes.conf")
self.assert_nodes(factory)
def test_load_nodes_config_from_file_invalid_filename(self):
factory = PatternNodeFactory()
factory.load_nodes_config_from_file("some_rubbish.txt")
self.assert_nodes(factory)
|
# Generated by Django 2.0.2 on 2018-04-04 14:06
from django.db import migrations
from voter.models import BadLineTracker
def forward_0018(apps, schema):
"""
Given the BadLine objects in the DB, create BadLineRange
objects, collapsing runs of errors into single objects.
This *loses information* - we don't store the actual bad
line for every original BadLine object, only the first in
each run.
"""
BadLine = apps.get_model('voter.BadLine')
BadLineRange = apps.get_model('voter.BadLineRange')
BadLineRange.objects.all().delete()
tracker = None
# Use .iterator() because there could be millions of these records and we can't
# hold them all in memory.
for line in BadLine.objects.order_by('filename', 'is_warning', 'line_no', 'message').iterator():
if tracker and tracker.filename != line.filename:
tracker.flush()
tracker = None
if not tracker:
tracker = BadLineTracker(line.filename, BadLineRange)
tracker.add(
line.line_no,
line.line,
line.message,
line.is_warning
)
if tracker:
tracker.flush()
def backward_0018(apps, schema):
"Reverse of forward_00018"
BadLine = apps.get_model('voter.BadLine')
BadLineRange = apps.get_model('voter.BadLineRange')
BadLine.objects.all().delete()
# Not sure how many BadLineRange objects there might be, but
# adding .iterator() can't hurt.
for blr in BadLineRange.objects.all().iterator():
i = 0
for line_no in range(start=blr.first_line_no, stop=blr.last_line_no + 1):
BadLine.objects.create(
filename=blr.filename,
message=blr.message,
line=blr.example_line if i == 0 else '',
is_warning=blr.is_warning,
line_no=line_no,
)
i += 1
class Migration(migrations.Migration):
dependencies = [
('voter', '0017_badlinerange'),
]
operations = [
migrations.RunPython(
forward_0018,
backward_0018,
)
]
|
#!/usr/bin/env python3
import typing
import snips_nlu.dataset as DS
from snips_nlu.common.utils import unicode_string
import io
import yaml
import random
from .word_dict import random_dict
import copy
class Dataset(DS.Dataset):
slot_intent_template = {
"type": "intent",
"name": "", # ENTITY{entity_name}
"slots": [
{
"name": "", # {entity_name}
"entity": "" # {eytity_name}
}
],
"utterances": [] # "{random words} {entity_value} {random words}"
}
def __init__(self, intents = [], entites = []):
super(Dataset, self).__init__("en", intents, entites)
def from_yaml(self, filename):
with io.open(filename) as f:
self.yaml = f.read()
ds = DS.Dataset.from_yaml_files("en", [io.StringIO(self.yaml)])
self.entity_intents = []
for entity in ds.entities:
entity_intent = copy.deepcopy(self.__class__.slot_intent_template)
entity_intent["name"] = f"ENTITY{entity.name}"
entity_intent["slots"][0]["name"] = entity.name
entity_intent["slots"][0]["entity"] = entity.name
for utterance in entity.utterances:
pre_words_count, post_words_count = random.randint(1, 5), random.randint(1, 5)
entity_intent["utterances"].append(" ".join(random.choices(random_dict, k=pre_words_count))
+ f" [{entity.name}]({utterance.value}) " +
" ".join(random.choices(random_dict, k=post_words_count)))
for utterance in entity.utterances:
pre_words_count, post_words_count = random.randint(1, 5), 0
entity_intent["utterances"].append(" ".join(random.choices(random_dict, k=pre_words_count))
+ f" [{entity.name}]({utterance.value})")
for utterance in entity.utterances:
pre_words_count, post_words_count = 0, random.randint(1, 5)
entity_intent["utterances"].append(f"[{entity.name}]({utterance.value}) " +
" ".join(random.choices(random_dict, k=post_words_count)))
if entity_intent["utterances"] == []:
continue
self.entity_intents.append(entity_intent)
self.entity_intent_yaml: typing.AnyStr = yaml.dump_all(self.entity_intents)
ds = DS.Dataset.from_yaml_files("en", [io.StringIO(self.yaml), io.StringIO(self.entity_intent_yaml)])
ret = Dataset(ds.intents, ds.entities)
ret.yaml = self.yaml
ret.entity_intent_yaml = self.entity_intent_yaml
ret.entity_intents = self.entity_intents
return ret
|
from app.extensions import api
from flask_restplus import fields
CLIENT = api.model(
'Client', {
'clientid': fields.Integer,
'type': fields.String,
'org_legalname': fields.String,
'org_doingbusinessas': fields.String,
'ind_firstname': fields.String,
'ind_lastname': fields.String,
'ind_middlename': fields.String,
'ind_phonenumber': fields.String,
'dayphonenumber': fields.String,
'dayphonenumberext': fields.String,
'faxnumber': fields.String,
'email': fields.String,
'org_bcfedincorpnumber': fields.String,
'org_bcregnumber': fields.String,
'org_societynumber': fields.String,
'org_hstregnumber': fields.String,
'org_contactname': fields.String,
'mailingaddressline1': fields.String,
'mailingaddressline2': fields.String,
'mailingaddresscity': fields.String,
'mailingaddressprovstate': fields.String,
'mailingaddresscountry': fields.String,
'mailingaddresspostalzip': fields.String,
})
CONTACT = api.model(
'CONTACT', {
'type': fields.String,
'org_legalname': fields.String,
'org_doingbusinessas': fields.String,
'ind_firstname': fields.String,
'ind_lastname': fields.String,
'ind_middlename': fields.String,
'ind_phonenumber': fields.String,
'dayphonenumber': fields.String,
'dayphonenumberext': fields.String,
'faxnumber': fields.String,
'email': fields.String,
'org_bcfedincorpnumber': fields.String,
'org_bcregnumber': fields.String,
'org_societynumber': fields.String,
'org_hstregnumber': fields.String,
'org_contactname': fields.String,
'mailingaddressline1': fields.String,
'contacttype': fields.String,
'contactcertificationtype': fields.String,
'contactcertificationid': fields.String,
'mailingaddressline2': fields.String,
'mailingaddresscity': fields.String,
'mailingaddressprovstate': fields.String,
'mailingaddresscountry': fields.String,
'mailingaddresspostalzip': fields.String,
'seq_no': fields.Integer,
})
DOCUMENT = api.model(
'DOCUMENT', {
'id': fields.Integer,
'messageid': fields.Integer,
'documenturl': fields.String,
'filename': fields.String,
'documenttype': fields.String,
'description': fields.String,
'document_manager_document_guid': fields.String,
'is_final_package': fields.Boolean,
'final_package_order': fields.Integer,
'is_referral_package': fields.Boolean,
'is_consultation_package': fields.Boolean
})
PLACER_ACTIVITY = api.model(
'PLACER_ACTIVITY', {
'placeractivityid': fields.Integer,
'type': fields.String,
'quantity': fields.Integer,
'depth': fields.Integer,
'length': fields.Integer,
'width': fields.Integer,
'disturbedarea': fields.Arbitrary,
'timbervolume': fields.Arbitrary,
})
SETTLING_POND = api.model(
'SETTLING_POND', {
'settlingpondid': fields.String,
'pondid': fields.String,
'watersource': fields.String,
'width': fields.Integer,
'length': fields.Integer,
'depth': fields.Integer,
'constructionmethod': fields.String,
'disturbedarea': fields.Arbitrary,
'timbervolume': fields.Arbitrary,
})
SURFACE_BULK_SAMPLE_ACTIVITY = api.model(
'SURFACE_BULK_SAMPLE_ACTIVITY', {
'type': fields.String,
'quantity': fields.Integer,
'disturbedarea': fields.Arbitrary,
'timbervolume': fields.Arbitrary,
'length': fields.Integer,
'width': fields.Integer,
})
SAND_GRAVEL_QUARRY_ACTIVITY = api.model('SAND_GRAVEL_QUARRY_ACTIVITY', {
'type': fields.String,
'disturbedarea': fields.Arbitrary,
'timbervolume': fields.Arbitrary,
'length': fields.Integer,
'width': fields.Integer,
})
UNDER_EXP_NEW_ACTIVITY = api.model(
'UNDER_EXP_NEW_ACTIVITY', {
'type': fields.String,
'incline': fields.Arbitrary,
'inclineunits': fields.String,
'quantity': fields.Integer,
'length': fields.Arbitrary,
'width': fields.Arbitrary,
'height': fields.Arbitrary,
'seq_no': fields.Integer,
})
UNDER_EXP_REHAB_ACTIVITY = api.model(
'UNDER_EXP_REHAB_ACTIVITY', {
'type': fields.String,
'incline': fields.Arbitrary,
'inclineunits': fields.String,
'quantity': fields.Integer,
'length': fields.Arbitrary,
'width': fields.Arbitrary,
'height': fields.Arbitrary,
'seq_no': fields.Integer,
})
UNDER_EXP_SURFACE_ACTIVITY = api.model(
'UNDER_EXP_SURFACE_ACTIVITY', {
'type': fields.String,
'quantity': fields.Integer,
'disturbedarea': fields.Arbitrary,
'timbervolume': fields.Arbitrary,
})
EXP_ACCESS_ACTIVITY = api.model(
'EXP_ACCESS_ACTIVITY', {
'type': fields.String,
'length': fields.Arbitrary,
'lengthinmeters': fields.Integer,
'width': fields.Integer,
'disturbedarea': fields.Arbitrary,
'timbervolume': fields.Arbitrary,
'numberofsites': fields.Arbitrary,
})
EXP_SURFACE_DRILL_ACTIVITY = api.model(
'EXP_SURFACE_DRILL_ACTIVITY', {
'type': fields.String,
'numberofsites': fields.Arbitrary,
'disturbedarea': fields.Arbitrary,
'timbervolume': fields.Arbitrary,
'length': fields.Integer,
'width': fields.Integer,
})
MECH_TRENCHING_ACTIVITY = api.model(
'MECH_TRENCHING_ACTIVITY', {
'type': fields.String,
'numberofsites': fields.Integer,
'disturbedarea': fields.Arbitrary,
'timbervolume': fields.Arbitrary,
'length': fields.Arbitrary,
'width': fields.Arbitrary,
'depth': fields.Arbitrary,
})
WATER_SOURCE_ACTIVITY = api.model(
'WATER_SOURCE_ACTIVITY', {
'sourcewatersupply': fields.String,
'type': fields.String,
'useofwater': fields.String,
'estimateratewater': fields.Arbitrary,
'pumpsizeinwater': fields.Arbitrary,
'locationwaterintake': fields.String,
'seq_no': fields.Integer,
})
EQUIPMENT = api.model('EQUIPMENT', {
"type": fields.String,
"size": fields.String,
"quantity": fields.Integer
})
CAMP_ACTIVITY = api.model(
'CAMP_ACTIVITY', {
'name': fields.String,
'peopleincamp': fields.Arbitrary,
'numberofstructures': fields.Arbitrary,
'wastedisposal': fields.String,
'sanitaryfacilities': fields.String,
'watersupply': fields.String,
'quantityofwater': fields.Integer,
'disturbedarea': fields.Arbitrary,
'timbervolume': fields.Arbitrary,
'length': fields.Integer,
'width': fields.Integer,
})
BUILDING_ACTIVITY = api.model(
'BUILDING_ACTIVITY', {
'name': fields.String,
'purpose': fields.String,
'structure': fields.String,
'disturbedarea': fields.Arbitrary,
'timbervolume': fields.Arbitrary,
'length': fields.Integer,
'width': fields.Integer,
})
STAGING_AREA_ACTIVITY = api.model('STAGING_AREA_ACTIVITY', {
'name': fields.String,
'disturbedarea': fields.Arbitrary,
'timbervolume': fields.Arbitrary,
'length': fields.Integer,
'width': fields.Integer,
})
APPLICATION = api.model(
'Application', {
'messageid': fields.Integer,
'application_guid': fields.String,
'now_application_guid': fields.String,
'now_number': fields.String,
'originating_system': fields.String,
'mine_guid': fields.String,
'mine_name': fields.String,
'mine_region': fields.String,
'trackingnumber': fields.Integer,
'applicationtype': fields.String,
'status': fields.String,
'submitteddate': fields.DateTime,
'receiveddate': fields.DateTime,
'applicantclientid': fields.Integer,
'submitterclientid': fields.Integer,
'noticeofworktype': fields.String,
'typeofpermit': fields.String,
'typeofapplication': fields.String,
'minenumber': fields.String,
'minepurpose': fields.String,
'latitude': fields.Fixed(decimals=7),
'longitude': fields.Fixed(decimals=7),
'nameofproperty': fields.String,
'tenurenumbers': fields.String,
'crowngrantlotnumbers': fields.String,
'sitedirections': fields.String,
'firstaidequipmentonsite': fields.String,
'firstaidcertlevel': fields.String,
'descexplorationprogram': fields.String,
'describeexplosivetosite': fields.String,
'proposedstartdate': fields.DateTime,
'proposedenddate': fields.DateTime,
'proposedstartmonth': fields.String,
'proposedstartday': fields.String,
'proposedendmonth': fields.String,
'proposedendday': fields.String,
'yearroundseasonal': fields.String,
'landcommunitywatershed': fields.String,
'landprivate': fields.String,
'landlegaldesc': fields.String,
'archsitesaffected': fields.String,
'sandgravelquarryoperations': fields.String,
'storeexplosivesonsite': fields.String,
'bcexplosivespermitissued': fields.String,
'bcexplosivespermitnumber': fields.String,
'bcexplosivespermitexpiry': fields.DateTime,
'campdisturbedarea': fields.Arbitrary,
'camptimbervolume': fields.Arbitrary,
'bldgdisturbedarea': fields.Arbitrary,
'bldgtimbervolume': fields.Arbitrary,
'stgedisturbedarea': fields.Arbitrary,
'stgetimbervolume': fields.Arbitrary,
'fuellubstoreonsite': fields.String,
'fuellubstored': fields.Integer,
'fuellubstoremethodbulk': fields.String,
'fuellubstoremethodbarrel': fields.String,
'cbsfreclamation': fields.String,
'cbsfreclamationcost': fields.Arbitrary,
'mechtrenchingreclamation': fields.String,
'mechtrenchingreclamationcost': fields.Arbitrary,
'expsurfacedrillreclamation': fields.String,
'expsurfacedrillreclcorestorage': fields.String,
'expsurfacedrillprogam': fields.String,
'expsurfacedrillreclamationcost': fields.Arbitrary,
'expaccessreclamation': fields.String,
'expaccessreclamationcost': fields.Arbitrary,
'surfacebulksampleprocmethods': fields.String,
'surfacebulksamplereclamation': fields.String,
'surfacebulksamplereclsephandl': fields.String,
'surfacebulksamplerecldrainmiti': fields.String,
'surfacebulksamplereclcost': fields.Arbitrary,
'underexptotalore': fields.Integer,
'underexptotaloreunits': fields.String,
'underexptotalwaste': fields.Integer,
'underexptotalwasteunits': fields.String,
'underexpreclamation': fields.String,
'underexpreclamationcost': fields.Arbitrary,
'placerundergroundoperations': fields.String,
'placerhandoperations': fields.String,
'placerreclamationarea': fields.Arbitrary,
'placerreclamation': fields.String,
'placerreclamationcost': fields.Arbitrary,
'sandgrvqrydepthoverburden': fields.Arbitrary,
'sandgrvqrydepthtopsoil': fields.Arbitrary,
'sandgrvqrystabilizemeasures': fields.String,
'sandgrvqrywithinaglandres': fields.String,
'sandgrvqryalrpermitnumber': fields.String,
'sandgrvqrylocalgovsoilrembylaw': fields.String,
'sandgrvqryofficialcommplan': fields.String,
'sandgrvqrylandusezoning': fields.String,
'sandgrvqryendlanduse': fields.String,
'sandgrvqrytotalmineres': fields.Integer,
'sandgrvqrytotalmineresunits': fields.String,
'sandgrvqryannualextrest': fields.Integer,
'sandgrvqryannualextrestunits': fields.String,
'sandgrvqryreclamation': fields.String,
'sandgrvqryreclamationbackfill': fields.String,
'sandgrvqryreclamationcost': fields.Arbitrary,
'sandgrvqrygrdwtravgdepth': fields.Arbitrary,
'sandgrvqrygrdwtrexistingareas': fields.String,
'sandgrvqrygrdwtrtestpits': fields.String,
'sandgrvqrygrdwtrtestwells': fields.String,
'sandgrvqrygrdwtrother': fields.String,
'sandgrvqrygrdwtrmeasprotect': fields.String,
'sandgrvqryimpactdistres': fields.Integer,
'sandgrvqryimpactdistwater': fields.Integer,
'sandgrvqryimpactnoise': fields.String,
'sandgrvqryimpactprvtaccess': fields.String,
'sandgrvqryimpactprevtdust': fields.String,
'sandgrvqryimpactminvisual': fields.String,
'cutlinesexplgridtotallinekms': fields.Integer,
'cutlinesexplgridtimbervolume': fields.Arbitrary,
'cutlinesreclamation': fields.String,
'cutlinesreclamationcost': fields.Arbitrary,
'pondswastewatertreatfacility': fields.String,
'freeusepermit': fields.String,
'licencetocut': fields.String,
'timbertotalvolume': fields.Arbitrary,
'campbuildstgetotaldistarea': fields.Arbitrary,
'mechtrenchingtotaldistarea': fields.Arbitrary,
'expsurfacedrilltotaldistarea': fields.Arbitrary,
'expaccesstotaldistarea': fields.Arbitrary,
'surfacebulksampletotaldistarea': fields.Arbitrary,
'placertotaldistarea': fields.Arbitrary,
'underexptotaldistarea': fields.Arbitrary,
'sandgrvqrytotaldistarea': fields.Arbitrary,
'pondstotaldistarea': fields.Arbitrary,
'reclcostsubtotal': fields.Arbitrary,
'reclcostexist': fields.Arbitrary,
'reclcostrecl': fields.Arbitrary,
'reclcosttotal': fields.Arbitrary,
'reclareasubtotal': fields.Arbitrary,
'reclareaexist': fields.Arbitrary,
'reclarearecl': fields.Arbitrary,
'reclareatotal': fields.Arbitrary,
'anyotherinformation': fields.String,
'vfcbcapplicationurl': fields.String,
'messagecreateddate': fields.DateTime,
'processed': fields.String,
'processeddate': fields.DateTime,
'cutlinesexplgriddisturbedarea': fields.Arbitrary,
'pondsrecycled': fields.String,
'pondsexfiltratedtoground': fields.String,
'pondsdischargedtoenv': fields.String,
'pondsreclamation': fields.String,
'pondsreclamationcost': fields.Arbitrary,
'sandgrvqrytotalexistdistarea': fields.Arbitrary,
'nrsosapplicationid': fields.String,
'isblastselect': fields.String,
'istimberselect': fields.String,
'applicantindividualorcompany': fields.String,
'applicantrelationship': fields.String,
'termofapplication': fields.Arbitrary,
'hasaccessauthorizations': fields.String,
'accessauthorizationsdetails': fields.String,
'accessauthorizationskeyprovided': fields.String,
'landpresentcondition': fields.String,
'currentmeansofaccess': fields.String,
'physiography': fields.String,
'oldequipment': fields.String,
'typeofvegetation': fields.String,
'recreationuse': fields.String,
'isparkactivities': fields.String,
'hasltgovauthorization': fields.String,
'hasarchaeologicalprotectionplan': fields.String,
'isonprivateland': fields.String,
'hasengagedfirstnations': fields.String,
'hasculturalheritageresources': fields.String,
'archaeologicalprotectionplan': fields.String,
'firstnationsactivities': fields.String,
'curturalheritageresources': fields.String,
'hasproposedcrossings': fields.String,
'proposedcrossingschanges': fields.String,
'cleanoutdisposalplan': fields.String,
'maxannualtonnage': fields.Arbitrary,
'maxannualcubicmeters': fields.Arbitrary,
'proposedproduction': fields.Arbitrary,
'isaccessgated': fields.String,
'permitnumber': fields.String,
'atsauthorizationnumber': fields.Arbitrary,
'atsprojectnumber': fields.Arbitrary,
'filenumberofappl': fields.String,
'originalstartdate': fields.DateTime,
'annualsummarysubmitted': fields.String,
'firstyearofmulti': fields.String,
'authorizationdetail': fields.String,
'oncrownland': fields.String,
'havelicenceofoccupation': fields.String,
'appliedforlicenceofoccupation': fields.String,
'licenceofoccupation': fields.String,
'noticeservedtoprivate': fields.String,
'sandgrvqryprogressivereclam': fields.String,
'sandgrvqrymaxunreclaimed': fields.Arbitrary,
'pondtypeofsediment': fields.String,
'pondtypeconstruction': fields.String,
'pondarea': fields.String,
'pondspillwaydesign': fields.String,
'camphealthauthority': fields.String,
'camphealthconsent': fields.String,
'proposedproductionunit': fields.String,
'placerstreamdiversion': fields.String,
'sandgrvqrydescription': fields.String,
'hassurfacedisturbanceoutsidetenure': fields.String,
'bedrockexcavation': fields.String,
'hassurfacedisturbanceoutsidetenure': fields.String,
'proposedactivites': fields.String,
'applicant': fields.Nested(CLIENT),
'submitter': fields.Nested(CLIENT),
'documents': fields.List(fields.Nested(DOCUMENT)),
'contacts': fields.List(fields.Nested(CONTACT)),
'existing_placer_activity': fields.Nested(PLACER_ACTIVITY),
'existing_settling_pond': fields.Nested(SETTLING_POND),
'exp_access_activity': fields.List(fields.Nested(EXP_ACCESS_ACTIVITY)),
'exp_surface_drill_activity': fields.List(fields.Nested(EXP_SURFACE_DRILL_ACTIVITY)),
'proposed_placer_activity': fields.Nested(PLACER_ACTIVITY),
'proposed_settling_pond': fields.Nested(SETTLING_POND),
'surface_bulk_sample_activity': fields.List(fields.Nested(SURFACE_BULK_SAMPLE_ACTIVITY)),
'sand_grv_qry_activity': fields.List(fields.Nested(SAND_GRAVEL_QUARRY_ACTIVITY)),
'under_exp_new_activity': fields.List(fields.Nested(UNDER_EXP_NEW_ACTIVITY)),
'under_exp_rehab_activity': fields.List(fields.Nested(UNDER_EXP_REHAB_ACTIVITY)),
'under_exp_surface_activity': fields.List(fields.Nested(UNDER_EXP_SURFACE_ACTIVITY)),
'water_source_activity': fields.List(fields.Nested(WATER_SOURCE_ACTIVITY)),
'mech_trenching_activity': fields.List(fields.Nested(MECH_TRENCHING_ACTIVITY)),
'camps': fields.List(fields.Nested(CAMP_ACTIVITY)),
'buildings': fields.List(fields.Nested(BUILDING_ACTIVITY)),
'stagingareas': fields.List(fields.Nested(STAGING_AREA_ACTIVITY)),
'equipment': fields.List(fields.Nested(EQUIPMENT))
})
APPLICATION_LIST = api.model(
'ApplicationList', {
'application_guid': fields.String,
'now_application_guid': fields.String,
'mine_guid': fields.String,
'mine_name': fields.String,
'mine_region': fields.String,
'minenumber': fields.String,
'noticeofworktype': fields.String,
'trackingnumber': fields.Integer,
'status': fields.String,
'receiveddate': fields.Date,
})
PAGINATED_LIST = api.model(
'List', {
'current_page': fields.Integer,
'total_pages': fields.Integer,
'items_per_page': fields.Integer,
'total': fields.Integer,
})
PAGINATED_APPLICATION_LIST = api.inherit('PaginatedApplicationList', PAGINATED_LIST, {
'records': fields.List(fields.Nested(APPLICATION_LIST)),
})
APPLICATIONNDA = api.model(
'ApplicationNDA', {
'messageid': fields.Integer,
'application_nda_guid': fields.String,
'originating_system': fields.String,
'mine_guid': fields.String,
'trackingnumber': fields.Integer,
'applicationtype': fields.String,
'status': fields.String,
'submitteddate': fields.DateTime,
'receiveddate': fields.DateTime,
'typedeemedauthorization': fields.String,
'permitnumber': fields.String,
'minenumber': fields.String,
'nownumber': fields.String,
'planactivitiesdrillprogram': fields.String,
'planactivitiesipsurvey': fields.String,
'proposedstartdate': fields.DateTime,
'proposedenddate': fields.DateTime,
'totallinekilometers': fields.Integer,
'descplannedactivities': fields.String,
'proposednewenddate': fields.DateTime,
'reasonforextension': fields.String,
'anyotherinformation': fields.String,
'vfcbcapplicationurl': fields.String,
'messagecreateddate': fields.DateTime,
'processed': fields.String,
'processeddate': fields.DateTime,
'nrsosapplicationid': fields.String,
'applicant': fields.Nested(CLIENT),
'submitter': fields.Nested(CLIENT),
'documents': fields.List(fields.Nested(DOCUMENT))
})
APPLICATIONSTARTSTOP = api.model(
'ApplicationStartStop', {
'termOfPermit': fields.String,
'permitIssuedDate': fields.String,
'portalApplicationPackageId': fields.String,
'nowNumber': fields.String,
'applicationId': fields.String,
'permitExpiryDate': fields.String,
'noticeOfWorkType': fields.String,
'applicationType': fields.String,
'processedDate': fields.String,
'contacts': fields.List(fields.Nested(CLIENT)),
'mineNumber': fields.String,
'typeOfApplication': fields.String,
'applicationDescription': fields.String,
'attachments': fields.List(fields.Nested(DOCUMENT)),
'status': fields.String,
'approvalNumber': fields.String,
'links': fields.List(fields.String),
'stopWorkDate': fields.String,
'startWorkDate': fields.String,
'receivedDate': fields.String,
'portalApplicationPackageNumber': fields.String,
'typeOfPermit': fields.String,
'proposedStartDate': fields.String,
'otherInformation': fields.String,
'permitStartDate': fields.String,
'submittedDate': fields.String,
'portalApplicationId': fields.String,
'proposedEndDate': fields.String
}) |
import time
import numpy as np
import requests
from jina.clients import py_client
from jina.clients.python import PyClient
from jina.clients.python.io import input_files, input_numpy
from jina.drivers.helper import array2pb
from jina.enums import ClientMode
from jina.flow import Flow
from jina.main.parser import set_gateway_parser
from jina.peapods.gateway import RESTGatewayPea
from jina.proto.jina_pb2 import Document
from tests import JinaTestCase
class MyTestCase(JinaTestCase):
def test_client(self):
f = Flow().add(yaml_path='_forward')
with f:
print(py_client(port_grpc=f.port_grpc).call_unary(b'a1234', mode=ClientMode.INDEX))
def tearDown(self) -> None:
super().tearDown()
time.sleep(3)
def test_check_input(self):
input_fn = iter([b'1234', b'45467'])
PyClient.check_input(input_fn)
input_fn = iter([Document(), Document()])
PyClient.check_input(input_fn)
bad_input_fn = iter([b'1234', '45467', [12, 2, 3]])
self.assertRaises(TypeError, PyClient.check_input, bad_input_fn)
bad_input_fn = iter([Document(), None])
self.assertRaises(TypeError, PyClient.check_input, bad_input_fn)
def test_gateway_ready(self):
p = set_gateway_parser().parse_args([])
with RESTGatewayPea(p):
a = requests.get(f'http://0.0.0.0:{p.port_grpc}/ready')
self.assertEqual(a.status_code, 200)
with RESTGatewayPea(p):
a = requests.post(f'http://0.0.0.0:{p.port_grpc}/api/ass')
self.assertEqual(a.status_code, 405)
def test_gateway_index(self):
f = Flow(rest_api=True).add(yaml_path='_forward')
with f:
a = requests.post(f'http://0.0.0.0:{f.port_grpc}/api/index',
json={'data': [
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAIAAABLbSncAAAA2ElEQVR4nADIADf/AxWcWRUeCEeBO68T3u1qLWarHqMaxDnxhAEaLh0Ssu6ZGfnKcjP4CeDLoJok3o4aOPYAJocsjktZfo4Z7Q/WR1UTgppAAdguAhR+AUm9AnqRH2jgdBZ0R+kKxAFoAME32BL7fwQbcLzhw+dXMmY9BS9K8EarXyWLH8VYK1MACkxlLTY4Eh69XfjpROqjE7P0AeBx6DGmA8/lRRlTCmPkL196pC0aWBkVs2wyjqb/LABVYL8Xgeomjl3VtEMxAeaUrGvnIawVh/oBAAD///GwU6v3yCoVAAAAAElFTkSuQmCC',
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAIAAABLbSncAAAA2ElEQVR4nADIADf/AvdGjTZeOlQq07xSYPgJjlWRwfWEBx2+CgAVrPrP+O5ghhOa+a0cocoWnaMJFAsBuCQCgiJOKDBcIQTiLieOrPD/cp/6iZ/Iu4HqAh5dGzggIQVJI3WqTxwVTDjs5XJOy38AlgHoaKgY+xJEXeFTyR7FOfF7JNWjs3b8evQE6B2dTDvQZx3n3Rz6rgOtVlaZRLvR9geCAxuY3G+0mepEAhrTISES3bwPWYYi48OUrQOc//IaJeij9xZGGmDIG9kc73fNI7eA8VMBAAD//0SxXMMT90UdAAAAAElFTkSuQmCC']})
j = a.json()
self.assertTrue('index' in j)
self.assertEqual(len(j['index']['docs']), 2)
self.assertEqual(j['index']['docs'][0]['uri'],
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAIAAABLbSncAAAA2ElEQVR4nADIADf/AxWcWRUeCEeBO68T3u1qLWarHqMaxDnxhAEaLh0Ssu6ZGfnKcjP4CeDLoJok3o4aOPYAJocsjktZfo4Z7Q/WR1UTgppAAdguAhR+AUm9AnqRH2jgdBZ0R+kKxAFoAME32BL7fwQbcLzhw+dXMmY9BS9K8EarXyWLH8VYK1MACkxlLTY4Eh69XfjpROqjE7P0AeBx6DGmA8/lRRlTCmPkL196pC0aWBkVs2wyjqb/LABVYL8Xgeomjl3VtEMxAeaUrGvnIawVh/oBAAD///GwU6v3yCoVAAAAAElFTkSuQmCC')
self.assertEqual(a.status_code, 200)
def test_gateway_index_with_args(self):
f = Flow(rest_api=True).add(yaml_path='_forward')
with f:
a = requests.post(f'http://0.0.0.0:{f.port_grpc}/api/index',
json={'data': [
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAIAAABLbSncAAAA2ElEQVR4nADIADf/AxWcWRUeCEeBO68T3u1qLWarHqMaxDnxhAEaLh0Ssu6ZGfnKcjP4CeDLoJok3o4aOPYAJocsjktZfo4Z7Q/WR1UTgppAAdguAhR+AUm9AnqRH2jgdBZ0R+kKxAFoAME32BL7fwQbcLzhw+dXMmY9BS9K8EarXyWLH8VYK1MACkxlLTY4Eh69XfjpROqjE7P0AeBx6DGmA8/lRRlTCmPkL196pC0aWBkVs2wyjqb/LABVYL8Xgeomjl3VtEMxAeaUrGvnIawVh/oBAAD///GwU6v3yCoVAAAAAElFTkSuQmCC',
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAIAAABLbSncAAAA2ElEQVR4nADIADf/AvdGjTZeOlQq07xSYPgJjlWRwfWEBx2+CgAVrPrP+O5ghhOa+a0cocoWnaMJFAsBuCQCgiJOKDBcIQTiLieOrPD/cp/6iZ/Iu4HqAh5dGzggIQVJI3WqTxwVTDjs5XJOy38AlgHoaKgY+xJEXeFTyR7FOfF7JNWjs3b8evQE6B2dTDvQZx3n3Rz6rgOtVlaZRLvR9geCAxuY3G+0mepEAhrTISES3bwPWYYi48OUrQOc//IaJeij9xZGGmDIG9kc73fNI7eA8VMBAAD//0SxXMMT90UdAAAAAElFTkSuQmCC'],
'first_doc_id': 5,
})
j = a.json()
self.assertTrue('index' in j)
self.assertEqual(len(j['index']['docs']), 2)
self.assertEqual(j['index']['docs'][0]['docId'], 5)
self.assertEqual(j['index']['docs'][1]['docId'], 6)
self.assertEqual(j['index']['docs'][0]['uri'],
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAIAAABLbSncAAAA2ElEQVR4nADIADf/AxWcWRUeCEeBO68T3u1qLWarHqMaxDnxhAEaLh0Ssu6ZGfnKcjP4CeDLoJok3o4aOPYAJocsjktZfo4Z7Q/WR1UTgppAAdguAhR+AUm9AnqRH2jgdBZ0R+kKxAFoAME32BL7fwQbcLzhw+dXMmY9BS9K8EarXyWLH8VYK1MACkxlLTY4Eh69XfjpROqjE7P0AeBx6DGmA8/lRRlTCmPkL196pC0aWBkVs2wyjqb/LABVYL8Xgeomjl3VtEMxAeaUrGvnIawVh/oBAAD///GwU6v3yCoVAAAAAElFTkSuQmCC')
self.assertEqual(a.status_code, 200)
def test_io_files(self):
PyClient.check_input(input_files('*.*'))
PyClient.check_input(input_files('*.*', recursive=True))
PyClient.check_input(input_files('*.*', size=2))
PyClient.check_input(input_files('*.*', size=2, read_mode='rb'))
PyClient.check_input(input_files('*.*', sampling_rate=.5))
f = Flow().add(yaml_path='- !URI2Buffer {}')
def validate_mime_type(req):
for d in req.index.docs:
self.assertEqual(d.mime_type, 'text/x-python')
with f:
f.index(input_files('*.py'), validate_mime_type)
def test_io_np(self):
print(type(np.random.random([100, 4])))
PyClient.check_input(input_numpy(np.random.random([100, 4, 2])))
PyClient.check_input(['asda', 'dsadas asdasd'])
print(type(array2pb(np.random.random([100, 4, 2]))))
def test_unary_driver(self):
f = Flow().add(yaml_path='yaml/unarycrafter.yml')
def check_non_empty(req, field):
for d in req.index.docs:
self.assertEqual(len(d.chunks), 1)
self.assertEqual(d.chunks[0].WhichOneof('content'), field)
with f:
f.index_ndarray(np.random.random([10, 4, 2]), output_fn=lambda x: check_non_empty(x, 'blob'))
with f:
f.index(np.random.random([10, 4, 2]), output_fn=lambda x: check_non_empty(x, 'blob'))
with f:
f.index(['asda', 'dsadas asdasd'], output_fn=lambda x: check_non_empty(x, 'text'))
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'dialog.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_CDialog(object):
def setupUi(self, CDialog):
CDialog.setObjectName("CDialog")
CDialog.resize(281, 106)
self.gridLayout = QtWidgets.QGridLayout(CDialog)
self.gridLayout.setObjectName("gridLayout")
self.label_gif = QtWidgets.QLabel(CDialog)
self.label_gif.setObjectName("label_gif")
self.gridLayout.addWidget(self.label_gif, 0, 0, 1, 1)
self.lineEdit = QtWidgets.QLineEdit(CDialog)
self.lineEdit.setObjectName("lineEdit")
self.gridLayout.addWidget(self.lineEdit, 0, 1, 1, 1)
self.lePassword = QtWidgets.QLineEdit(CDialog)
self.lePassword.setObjectName("lePassword")
self.gridLayout.addWidget(self.lePassword, 1, 1, 1, 1)
self.label_gif_2 = QtWidgets.QLabel(CDialog)
self.label_gif_2.setObjectName("label_gif_2")
self.gridLayout.addWidget(self.label_gif_2, 1, 0, 1, 1)
self.buttonBox = QtWidgets.QDialogButtonBox(CDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout.addWidget(self.buttonBox, 2, 0, 1, 2)
self.retranslateUi(CDialog)
QtCore.QMetaObject.connectSlotsByName(CDialog)
def retranslateUi(self, CDialog):
_translate = QtCore.QCoreApplication.translate
CDialog.setWindowTitle(_translate("CDialog", "Dialog"))
self.label_gif.setText(_translate("CDialog", "姓名"))
self.label_gif_2.setText(_translate("CDialog", "密码"))
|
from abc import ABCMeta, abstractmethod
from typing import Any, Dict, Optional, Union
from six import with_metaclass
from ..request import ASGIRequest, Request
from ..serializers.marshmallow import DefaultSchemaMeta, Schema
from .base import Depends
class Marshmallow(with_metaclass(ABCMeta, Depends)):
def __init__(self, schema: Optional[Union[Schema, Dict[str, Any]]] = None) -> None:
if isinstance(schema, dict):
schema = Schema.from_dict(schema)()
elif type(schema) is DefaultSchemaMeta:
schema = schema()
elif isinstance(schema, Schema) or schema is None:
pass
else: # pragma: no cover
raise TypeError(f"schema type must be of type {Schema!r} or dict")
self.schema: Schema = schema
@abstractmethod
def get_data(self, request: Union[Request, ASGIRequest]) -> dict:
pass
def validate(self, request: Union[Request, ASGIRequest]):
data = self.get_data(request)
if self.schema is not None:
data = self.schema.load(data)
return data
def __call__(self, request: Union[Request, ASGIRequest]) -> dict:
data = self.validate(request)
return data
class Query(Marshmallow):
def get_data(self, request: Union[Request, ASGIRequest]) -> dict:
return request.params
class Form(Marshmallow):
def get_data(self, request: Union[Request, ASGIRequest]) -> dict:
return request.forms
class File(Marshmallow):
def get_data(self, request: Union[Request, ASGIRequest]) -> dict:
return request.files
class Body(Marshmallow):
def get_data(self, request: Union[Request, ASGIRequest]) -> dict:
return request.json
class Header(Marshmallow):
def get_data(self, request: Union[Request, ASGIRequest]) -> dict:
return request.headers
class Cookie(Marshmallow):
def get_data(self, request: Union[Request, ASGIRequest]) -> dict:
return request.cookies
|
# encoding: utf-8
"""
community.py
Created by Thomas Mangin on 2009-11-05.
Copyright (c) 2009-2017 Exa Networks. All rights reserved.
License: 3-clause BSD. (See the COPYRIGHT file)
"""
from exabgp.util import ordinal
from exabgp.bgp.message.update.attribute import Attribute
from struct import pack
# ======================================================= ExtendedCommunity (16)
# XXX: Should subclasses register with transitivity ?
class ExtendedCommunity (Attribute):
ID = Attribute.CODE.EXTENDED_COMMUNITY
FLAG = Attribute.Flag.TRANSITIVE | Attribute.Flag.OPTIONAL
COMMUNITY_TYPE = 0x00 # MUST be redefined by subclasses
COMMUNITY_SUBTYPE = 0x00 # MUST be redefined by subclasses
NON_TRANSITIVE = 0x40
registered_extended = {}
@classmethod
def register (cls, klass):
cls.registered_extended[(klass.COMMUNITY_TYPE & 0x0F,klass.COMMUNITY_SUBTYPE)] = klass
return klass
# size of value for data (boolean: is extended)
length_value = {False:7, True:6}
name = {False: 'regular', True: 'extended'}
__slots__ = ['community']
def __init__ (self, community):
# Two top bits are iana and transitive bits
self.community = community
self.klass = None
def __eq__(self, other):
return \
self.ID == other.ID and \
self.FLAG == other.FLAG and \
self.community == other.community
def __ne__(self, other):
return not self.__eq__(other)
def __lt__ (self, other):
return self.community < other.community
def __le__ (self, other):
return self.community <= other.community
def __gt__ (self, other):
return self.community > other.community
def __ge__ (self, other):
return self.community >= other.community
def iana (self):
return not not (self.community[0] & 0x80)
def transitive (self):
# bit set means "not transitive"
# RFC4360:
# T - Transitive bit
# Value 0: The community is transitive across ASes
# Value 1: The community is non-transitive across ASes
return not (self.community[0] & 0x40)
def pack (self, negotiated=None):
return self.community
def _subtype (self, transitive=True):
# if not transitive -> set the 'transitive' bit, as per RFC4360
return pack(
'!BB',
self.COMMUNITY_TYPE if transitive else self.COMMUNITY_TYPE | self.NON_TRANSITIVE,
self.COMMUNITY_SUBTYPE
)
def json (self):
h = 0x00
for byte in self.community:
h <<= 8
h += ordinal(byte)
s = self.klass.__repr__(self) if self.klass else ''
return '{ "value": %s, "string": "%s" }' % (h,s)
def __repr__ (self):
if self.klass:
return self.klass.__repr__(self)
h = 0x00
for byte in self.community:
h <<= 8
h += ordinal(byte)
return "0x%016X" % h
def __len__ (self):
return 8
def __hash__ (self):
return hash(self.community)
@staticmethod
def unpack (data, negotiated=None):
# 30/02/12 Quagga communities for soo and rt are not transitive when 4360 says they must be, hence the & 0x0FFF
community = (ordinal(data[0]) & 0x0F,ordinal(data[1]))
if community in ExtendedCommunity.registered_extended:
klass = ExtendedCommunity.registered_extended[community]
instance = klass.unpack(data)
instance.klass = klass
return instance
return ExtendedCommunity(data)
|
from django.conf.urls import url
from ..views import WorkingGroupListView
urlpatterns = [
url(r'^$', WorkingGroupListView.as_view(), name='working-groups'),
]
|
import PySimpleGUI as sg
import InstantRenameCLI
theme = {
"BACKGROUND": "#262B2F",
"TEXT": "#FFFFFF",
"INPUT": "#212326",
"TEXT_INPUT": "#FFFFFF",
"SCROLL": "#76B900",
"BUTTON": (
"#FFFFFF",
"#76B900"
),
"PROGRESS": (
"#000000",
"#000000"
),
"BORDER": 1,
"SLIDER_DEPTH": 0,
"PROGRESS_DEPTH": 0
}
sg.theme_add_new('NvidiaTheme', theme)
sg.theme('NvidiaTheme')
layout = [
[sg.Text("NvidiaInstantRename - A simple tool to move and rename Nvidia Share (Shadowplay) recordings to a more sensible format.")],
[
sg.TabGroup([[
sg.Tab('General', [
[sg.Text("Source folder "), sg.In(key="source"), sg.FolderBrowse(enable_events=True)],
[sg.Text("Destination folder"), sg.In(key="dest"), sg.FolderBrowse(enable_events=True)],
[sg.Text("Filename format "), sg.In("{date}.{time}.{app}{dvr}.mp4", key="format", enable_events=True)],
[sg.Text("placeholder", key="formatexample"),],
[sg.Checkbox("Validate (prevent moving non-shadowplay files)", default = True, key="validate")],
]),
sg.Tab('Advanced', [
[sg.Text("DVR indicator ({dvr} in formatting)"), sg.In(".DVR", key="dvrval")],
[sg.Text("Replace space character with (in app name)"), sg.In("_", key="spacechar", size=(1,0))],
[sg.Text("DVR-only destination folder"), sg.In("", key="dvrpath")],
[sg.Text("non-DVR destination folder"), sg.In("", key="nodvrpath")],
]),
sg.Tab('Help', [
[sg.Text('''Formatting options:
{app} - Application/Game name
{date} - Date, formatted with dashes
{time} - Time, formatted with dashes
{sy}/{shortyear} - Year, last two digits
{y}/{year} - Year
{m}/{month} - Month
{d}/{day} - Day
{h}/{hour} - Hour (24h)
{min}/{minute} - Minute
{s}/{second} - Second
{i}/{index} - Index number set by Nvidia Share after the date and time
{orig}/{original} - Original filename (without path)
{dvr} - Only present if recording is DVR, set text with "DVR indicator" in Advanced''')],
])
]], key="groups", enable_events=True
)
],
[sg.Button("Quit", font='Any 12 bold'), sg.Button("Preview", font='Any 12 bold'), sg.Button("Move/Rename", font='Any 12 bold')],
[sg.Listbox(
values=["Output will appear here..."], enable_events=True, size=(100, 20), expand_x=True, expand_y=True, key="logs"
)]
]
def showExample(window,values):
info = {
"app": "Video Game Name".replace(" ", values["spacechar"]),
"date": "2021-11-30",
"time": "17-31-43",
"year": "2021",
"shortyear": "21",
"month": "11",
"day": "30",
"hour": "17",
"minute": "31",
"second": "43",
"index": "123",
"dvr": values["dvrval"],
"original": "Video Game Name 2021.11.30 - 17.31.43.123.DVR.mp4",
}
try:
window["formatexample"].update(f"Example: {InstantRenameCLI.formatName(values['format'],info)}")
except:
window["formatexample"].update("Error generating example")
# Create the window
window = sg.Window("NvidiaInstantRename", layout, resizable=True, finalize=True) # use_custom_titlebar=True
try:
import winreg
key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Software\\NVIDIA Corporation\\Global\\ShadowPlay\\NVSPCAPS")
value = winreg.QueryValueEx(key, "DefaultPathW")
window["source"].update(value[0].decode(encoding='utf-16',errors='strict')[:-1])
except Exception as e:
print(f"Failed to automatically find source folder: {e}")
event, values = window.read(timeout=0)
showExample(window,values)
# Create an event loop
while True:
event, values = window.read()
# End program if user closes window or
# presses the OK button
if event == "Quit" or event == sg.WIN_CLOSED:
break
if event == "Preview" or event == "Move/Rename":
if len(values["dest"]) == 0:
window["logs"].update(["Error: Destination not specified"])
continue
if len(values["format"]) == 0:
window["logs"].update(["Error: Format not specified"])
continue
window["logs"].update(["Moving..."])
try:
logs = InstantRenameCLI.process(
{
'inputpath': values["source"],
'targetpath': values["dest"],
'format': values["format"],
'dvrval': values["dvrval"],
'dvrpath': values["dvrpath"] if len(values["dvrpath"]) > 0 else None,
'nodvrpath': values["nodvrpath"] if len(values["nodvrpath"]) > 0 else None,
'spacechar': values["spacechar"],
'simulate': event == "Preview",
'validate': values["validate"]
})
window["logs"].update(logs)
except Exception as e:
window["logs"].update([f"Error: {e}"])
if event == "format" or event == "groups":
showExample(window,values)
window.close() |
import json
with open('api_response.json') as file:
data = json.loads(file.read())['data']
print(data)
print(len(data))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-27 11:45
from __future__ import unicode_literals
from django.db import migrations
import sitetools.models.fields
class Migration(migrations.Migration):
dependencies = [
('sitetools', '0005_auto_20150317_1547'),
]
operations = [
migrations.AlterField(
model_name='legaldocumentversion',
name='lang',
field=sitetools.models.fields.LanguageField(choices=[(b'es', b'Spanish'), (b'en', b'English')], help_text='Legal document language for this version', max_length=2, verbose_name='Language'),
),
migrations.AlterField(
model_name='sitelog',
name='data',
field=sitetools.models.fields.JSONField(blank=True, default=b'"\\"null\\""', help_text='Extra data for log message', null=True, verbose_name='Data'),
),
]
|
#! /usr/bin/python3
import click
from easysettings import EasySettings
from gop import ApiClient
from gop import FileLayer
from gop import GopController
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
settings = EasySettings("gop.conf")
VERSION_REGEX = "^([0-9\\.]+|latest)$"
PATH_REGEX = "^(http[s]{0,1}:\\/\\/[a-zA-Z0-9\\.]+(:[0-9]+){0,1}|([0-9]{1,3}\\.){3}[0-9](:[0-9]+){0,1})$"
@click.group()
def cli():
pass
def create_api_client(fileLayer):
return ApiClient(settings.get("token"), fileLayer.parse_yaml("./manifest.yaml")["project"]["repository"][0]["path"])
def controller():
fileLayer = FileLayer()
return GopController(create_api_client(fileLayer), fileLayer)
@cli.command('init')
@click.option("--name", required=False)
@click.option('--author', required=False)
@click.option('--version', required=False)
def init_project(name, author, version):
controller().init_project(name, author, version)
@cli.command('ping')
def ping():
controller().ping()
@cli.command('upload-key')
@click.option('--key-file', type=click.Path(exists=True, readable=True, path_type=str), required=True)
def upload_key(key_file):
controller().upload_key(key_file)
@cli.command('add')
@click.option('--dependency', required=True)
@click.option('--version', required=True)
def add_pkg(dependency, version):
controller().add_pkg(dependency, version)
@cli.command('search')
@click.option('--author', required=True)
def search(author):
controller().search(author)
@cli.command('update')
@click.option('--dry-run', required=False, count=True)
def update(dry_run):
controller().update(dry_run)
@cli.command('login')
@click.option('--token', required=False)
@click.option('--repo', required=False, count=True)
def login(token, repo):
controller().login(token, repo)
@cli.command('logout')
def logout():
controller().logout()
@cli.command('install')
def install():
controller().install()
@cli.command('trust-key')
@click.option('--key-file', type=click.Path(exists=True, readable=True, path_type=str), required=True)
def trust(key_file, author):
controller().trust(key_file, author)
@cli.command('package')
@click.option('--key-file', type=click.Path(exists=True, readable=True, path_type=str), required=True)
def package(key_file):
controller().package(key_file)
@cli.command('generate-key-pair')
def generate_key_pair():
controller().generate_key_pair()
if __name__ == '__main__':
cli()
def __main__():
cli()
|
from typing import Optional
class Block(object):
def __init__(self):
pass
def will_accept(self, ctx : 'Interpreter.Context') -> Optional[bool]:
return False
def pre_process(self, ctx : 'Interpreter.Context'):
return None
def process(self, ctx : 'Interpreter.Context') -> Optional[str]:
return None
def post_process(self, ctx : 'Interpreter.Context'):
return None |
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# The following only applies to changes made to this file as part of YugaByte development.
#
# Portions Copyright (c) YugaByte, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations
# under the License.
#
# This script generates a header file which contains definitions
# for the current Kudu build (eg timestamp, git hash, etc)
import os
import subprocess
import sys
class Colors(object):
""" ANSI color codes. """
def __on_tty(x):
if not os.isatty(sys.stdout.fileno()):
return ""
return x
RED = __on_tty("\x1b[31m")
GREEN = __on_tty("\x1b[32m")
YELLOW = __on_tty("\x1b[33m")
RESET = __on_tty("\x1b[m")
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
Backported from Python 2.7 as it's implemented as pure python on stdlib.
>>> check_output(['/usr/bin/python', '--version'])
Python 2.6.2
"""
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = subprocess.CalledProcessError(retcode, cmd)
error.output = output
raise error
return output
def confirm_prompt(prompt):
"""
Issue the given prompt, and ask the user to confirm yes/no. Returns true
if the user confirms.
"""
while True:
print prompt, "[Y/n]:",
if not os.isatty(sys.stdout.fileno()):
print "Not running interactively. Assuming 'N'."
return False
pass
r = raw_input().strip().lower()
if r in ['y', 'yes', '']:
return True
elif r in ['n', 'no']:
return False
def get_my_email():
""" Return the email address in the user's git config. """
return check_output(['git', 'config', '--get', 'user.email']).strip()
|
import setuptools
with open("DESCRIPTION.md", "r") as fh:
long_description = fh.read()
# Taken largely from https://packaging.python.org/tutorials/packaging-projects/
setuptools.setup(
name="transfer-message",
version="0.0.1",
author="Michael Edwards",
author_email="medwards@walledcity.ca",
description="A command line tool for transferring messages between different messaging systems",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/medwards/cli-demo",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
# see https://python-packaging.readthedocs.io/en/latest/testing.html
test_suite='nose.collector',
tests_require=['nose'],
# see https://chriswarrick.com/blog/2014/09/15/python-apps-the-right-way-entry_points-and-scripts/
entry_points = {
'console_scripts': ['transfermessage=transfer_message.__main__:main'],
}
)
|
from django.contrib import admin
from django.urls import path
from .views.home import Index , store
from .views.signup import Signup
from .views.login import Login , logout
from .views.cart import Cart
from .views.checkout import CheckOut
from .views.orders import OrderView
from .middlewares.auth import auth_middleware
from .views.mail import sendemail
urlpatterns = [
path('', Index.as_view(), name='homepage'),
path('store', store , name='store'),
path('signup', Signup.as_view(), name='signup'),
path('login', Login.as_view(), name='login'),
path('logout', logout , name='logout'),
path('cart', auth_middleware(Cart.as_view()) , name='cart'),
path('check-out', CheckOut.as_view() , name='checkout'),
path('orders', auth_middleware(OrderView.as_view()), name='orders'),
path('sendemail', sendemail,name='sendemail'),
]
|
class Solution:
def addBinary(self, a: str, b: str) -> str:
c=int(a,2)+int(b,2)
return str(bin(c))[2:]
|
import sys
import os
import pymongo
import time
import random
from datetime import datetime
min_date = datetime(2012, 1, 1)
max_date = datetime(2013, 1, 1)
delta = (max_date - min_date).total_seconds()
job_id = '1'
if len(sys.argv) < 2:
sys.exit("You must supply the item_number argument")
elif len(sys.argv) > 2:
job_id = sys.argv[2]
documents_number = int(sys.argv[1])
batch_number = 5 * 1000;
job_name = 'Job#' + job_id
start = datetime.now();
# obtain a mongo connection
connection = pymongo.Connection("mongodb://localhost", safe=True)
# obtain a handle to the random database
db = connection.random
collection = db.randomData
batch_documents = [i for i in range(batch_number)];
for index in range(documents_number):
try:
date = datetime.fromtimestamp(time.mktime(min_date.timetuple()) + int(round(random.random() * delta)))
value = random.random()
document = {
'created_on' : date,
'value' : value,
}
batch_documents[index % batch_number] = document
if (index + 1) % batch_number == 0:
collection.insert(batch_documents)
index += 1;
if index % 100000 == 0:
print job_name, ' inserted ', index, ' documents.'
except:
print 'Unexpected error:', sys.exc_info()[0], ', for index ', index
raise
print job_name, ' inserted ', documents_number, ' in ', (datetime.now() - start).total_seconds(), 's'
|
import sys
import DefaultTable
import numpy
from fontTools import ttLib
from fontTools.misc.textTools import safeEval
import warnings
class table__h_m_t_x(DefaultTable.DefaultTable):
headerTag = 'hhea'
advanceName = 'width'
sideBearingName = 'lsb'
numberOfMetricsName = 'numberOfHMetrics'
def decompile(self, data, ttFont):
numberOfMetrics = int(getattr(ttFont[self.headerTag], self.numberOfMetricsName))
metrics = numpy.fromstring(data[:4 * numberOfMetrics],
numpy.int16)
if sys.byteorder <> "big":
metrics = metrics.byteswap()
metrics.shape = (numberOfMetrics, 2)
data = data[4 * numberOfMetrics:]
numberOfSideBearings = ttFont['maxp'].numGlyphs - numberOfMetrics
numberOfSideBearings = int(numberOfSideBearings)
if numberOfSideBearings:
assert numberOfSideBearings > 0, "bad hmtx/vmtx table"
lastAdvance = metrics[-1][0]
advances = numpy.array([lastAdvance] * numberOfSideBearings,
numpy.int16)
sideBearings = numpy.fromstring(data[:2 * numberOfSideBearings],
numpy.int16)
if sys.byteorder <> "big":
sideBearings = sideBearings.byteswap()
data = data[2 * numberOfSideBearings:]
if len(advances) and len(sideBearings):
additionalMetrics = numpy.array([advances, sideBearings], numpy.int16)
metrics = numpy.concatenate((metrics, numpy.transpose(additionalMetrics)))
else:
warnings.warn('Unable to include additional metrics')
if data:
sys.stderr.write("too much data for hmtx/vmtx table\n")
metrics = metrics.tolist()
self.metrics = {}
for i in range(len(metrics)):
glyphName = ttFont.getGlyphName(i)
self.metrics[glyphName] = metrics[i]
def compile(self, ttFont):
metrics = []
for glyphName in ttFont.getGlyphOrder():
metrics.append(self.metrics[glyphName])
lastAdvance = metrics[-1][0]
lastIndex = len(metrics)
while metrics[lastIndex-2][0] == lastAdvance:
lastIndex = lastIndex - 1
if lastIndex <= 1:
# all advances are equal
lastIndex = 1
break
additionalMetrics = metrics[lastIndex:]
additionalMetrics = map(lambda (advance, sb): sb, additionalMetrics)
metrics = metrics[:lastIndex]
setattr(ttFont[self.headerTag], self.numberOfMetricsName, len(metrics))
metrics = numpy.array(metrics, numpy.int16)
if sys.byteorder <> "big":
metrics = metrics.byteswap()
data = metrics.tostring()
additionalMetrics = numpy.array(additionalMetrics, numpy.int16)
if sys.byteorder <> "big":
additionalMetrics = additionalMetrics.byteswap()
data = data + additionalMetrics.tostring()
return data
def toXML(self, writer, ttFont):
names = self.metrics.keys()
names.sort()
for glyphName in names:
advance, sb = self.metrics[glyphName]
writer.simpletag("mtx", [
("name", glyphName),
(self.advanceName, advance),
(self.sideBearingName, sb),
])
writer.newline()
def fromXML(self, (name, attrs, content), ttFont):
if not hasattr(self, "metrics"):
self.metrics = {}
if name == "mtx":
self.metrics[attrs["name"]] = [safeEval(attrs[self.advanceName]),
safeEval(attrs[self.sideBearingName])]
def __getitem__(self, glyphName):
return self.metrics[glyphName]
def __setitem__(self, glyphName, (advance, sb)):
self.metrics[glyphName] = advance, sb
|
from PyQt5.QtWidgets import *
import vispy.app
import sys
import os
import numpy as np
from vispy import app, geometry
import vispy.scene
from vispy.color import Color
from vispy.scene.visuals import Polygon, Ellipse, Rectangle, RegularPolygon
from vispy import app, scene
from vispy.app import use_app
from vispy.visuals.shaders import Function
from vispy.visuals.collections import PointCollection
use_app('PyQt5')
class MyCanvas(vispy.scene.SceneCanvas):
def __init__(self, size: (800, 500), watch_dir: str = "."):
vispy.scene.SceneCanvas.__init__(self, keys='interactive', size=size, bgcolor=Color('white'))
self.unfreeze()
self.view = self.central_widget.add_view()
self.freeze()
def drawSome(self):
self.unfreeze()
# generate data
# pos = np.random.normal(size=(100000, 3), scale=0.2)
# # one could stop here for the data generation, the rest is just to make the
# # data look more interesting. Copied over from magnify.py
# centers = np.random.normal(size=(50, 3))
# indexes = np.random.normal(size=100000, loc=centers.shape[0] / 2.,
# scale=centers.shape[0] / 3.)
# indexes = np.clip(indexes, 0, centers.shape[0] - 1).astype(int)
# scales = 10 ** (np.linspace(-2, 0.5, centers.shape[0]))[indexes][:, np.newaxis]
# pos *= scales
# pos += centers[indexes]
#
# # create scatter object and fill in the data
# scatter = scene.Markers()
# scatter.set_data(pos, edge_color=None, face_color=(1, 1, 1, .5), size=5)
#
# self.view.add(scatter)
self.view.camera = 'turntable' # or try 'arcball'
# add a colored 3D axis for orientation
ax = scene.Axis(pos=[[0, 0], [1, 0]], tick_direction=(0, -1), axis_color='r', tick_color='r', text_color='r',
font_size=16, parent=self.view.scene)
yax = scene.Axis(pos=[[0, 0], [0, 1]], tick_direction=(-1, 0), axis_color='g', tick_color='g', text_color='g',
font_size=16, parent=self.view.scene)
zax = scene.Axis(pos=[[0, 0], [-1, 0]], tick_direction=(0, -1), axis_color='b', tick_color='b', text_color='b',
font_size=16, parent=self.view.scene)
zax.transform = scene.transforms.MatrixTransform() # its acutally an inverted xaxis
zax.transform.rotate(90, (0, 1, 0)) # rotate cw around yaxis
zax.transform.rotate(-45, (0, 0, 1)) # tick direction towards (-1,-1)
# self.gridlines = visuals.GridLines(color=Color('gray'))
# self.gridlines.set_gl_state('translucent', cull_face=False)
# self.view.add(self.gridlines)
self.freeze()
if __name__ == "__main__":
app = QApplication(sys.argv)
canvas = MyCanvas(size=(800, 500))
canvas.drawSome()
w = QMainWindow()
widget = QWidget()
w.setCentralWidget(widget)
widget.setLayout(QVBoxLayout())
widget.layout().addWidget(canvas.native)
widget.layout().addWidget(QPushButton())
w.show()
vispy.app.run()
|
# -*- coding: utf-8 -*-
"""
Conversion between ``crystals`` data structures and other modules.
These functions are not expected to be used on their own; see the associated
`Crystal` methods instead, like `Crystal.to_cif`.
"""
from abc import abstractmethod
from contextlib import AbstractContextManager, redirect_stdout
from io import StringIO
import numpy as np
from CifFile import CifBlock, CifFile
from . import __version__
try:
import ase
except ImportError:
WITH_ASE = False
else:
WITH_ASE = True
CIF_HEADER = """
#\#CIF_2.0
#
# File generated by `crystals` {__version__}, documented at https://crystals.rtfd.io
# Reference: L. P. René de Cotret et al, An open-source software ecosystem for the interactive exploration
# of ultrafast electron scattering data, Advanced Structural and Chemical Imaging 4:11 (2018) DOI: 10.1186/s40679-018-0060-y.
#
# For more information on this type of file, please visit https://www.iucr.org/resources/cif
"""
# TODO: test against known XYZ file
def write_xyz(crystal, fname, comment=None):
"""
Generate an atomic coordinates .xyz file from a crystal structure.
Parameters
----------
crystal : crystals.Crystal
Crystal to be converted.
fname : path-like
The XYZ file will be written to this file. If the file already exists,
it will be overwritten.
comment : str or None, optional
Comment to include at the second line of ``fname``.
"""
# Format is specified here:
# http://openbabel.org/wiki/XYZ_%28format%29
comment = comment or ""
with open(fname, "wt", encoding="ascii") as file:
# First two lines are:
# 1. Number of atoms described in the file
# 2. Optional comment
file.write(str(len(crystal)) + "\n")
file.write(comment + "\n")
# Write atomic data row-by-row
# For easier human readability, atoms are sorted
# by element
for atom in crystal.itersorted():
x, y, z = atom.coords_cartesian
row = f" {atom.element:<2} {x:10.5f} {y:10.5f} {z:10.5f}"
file.write(row + "\n")
# TODO: write the asymmetric cell + symmetry operatrs
def write_cif(crystal, fname):
"""
Generate an atomic coordinates .cif file from a crystal structure.
Parameters
----------
crystal : crystals.Crystal
Crystal to be converted.
fname : path-like
The CIF file will be written to this file. If the file already exists,
it will be overwritten.
comment : str or None, optional
Comment to include at the second line of ``fname``.
"""
cf = CifFile(strict=False)
a, b, c, alpha, beta, gamma = crystal.lattice_parameters
lattice_items = {
"_cell_length_a": a,
"_cell_length_b": b,
"_cell_length_c": c,
"_cell_angle_alpha": alpha,
"_cell_angle_beta": beta,
"_cell_angle_gamma": gamma,
}
sym = crystal.symmetry()
symmetry_items = {
"_symmetry_Int_Tables_number": sym["international_number"],
"_symmetry_space_group_name_Hall": sym["hall_symbol"],
}
block = CifBlock()
for key, val in lattice_items.items():
block[key] = val
for key, val in symmetry_items.items():
block[key] = val
# Note that we are using all atoms in the unit-cell,
# and not the asymmetric unit cell + symmetry operators
# This is valid CIF! And it is much simpler to implement
# TODO: how to determine asymmetric cell + symmetry operations?
atoms = list(crystal.primitive().unitcell)
symbols = [atm.symbol for atm in atoms]
xf = [atm.coords_fractional[0] for atm in atoms]
yf = [atm.coords_fractional[1] for atm in atoms]
zf = [atm.coords_fractional[2] for atm in atoms]
block.CreateLoop(
datanames=[
"_atom_site_type_symbol",
"_atom_site_fract_x",
"_atom_site_fract_y",
"_atom_site_fract_z",
],
length_check=False,
)
block["_atom_site_type_symbol"] = symbols
block["_atom_site_fract_x"] = xf
block["_atom_site_fract_y"] = yf
block["_atom_site_fract_z"] = zf
# Name of the block cannot be empty!
block_name = crystal.chemical_formula.replace(" ", "_")
cf[block_name] = block
# Converting to string writes to stdout for some reason
with redirect_stdout(StringIO()):
lines = str(cf).splitlines()
with open(fname, "w", encoding="utf-8") as f:
f.write(CIF_HEADER)
f.write("\n".join(lines[13::])) # Skip the fixed header
def ase_atoms(crystal, **kwargs):
"""
Convert a ``crystals.Crystal`` object into an ``ase.Atoms`` object.
Keyword arguments are passed to ``ase.Atoms`` constructor.
Parameters
----------
crystal : crystals.Crystal
Crystal to be converted.
Returns
-------
atoms : ase.Atoms
Group of atoms ready for ASE's routines.
Raises
------
ImportError : If ASE is not installed
"""
if not WITH_ASE:
raise ImportError("ASE is not installed/importable.")
return ase.Atoms(
symbols=[
ase.Atom(
symbol=atom.element,
position=atom.coords_cartesian,
magmom=atom.magmom,
mass=atom.mass,
)
for atom in crystal
],
cell=np.array(crystal.lattice_vectors),
**kwargs,
)
|
#####
# From https://github.com/ttu/ruuvitag-sensor/blob/master/ruuvitag_sensor/data_formats.py
#####
class DataFormats(object):
@staticmethod
def convertData(data):
result = DataFormats._getDataFormat24(data)
if (result is not None):
return (result, 2)
result = DataFormats._getDataFormat3(data)
if (result is not None):
return (result, 3)
result = DataFormats._getDataFormat5(data)
if (result is not None):
return (result, 5)
return (None, None)
def _getDataFormat24(data):
try:
base16_split = [data[i:i + 2] for i in range(0, len(data), 2)]
selected_hexs = filter(lambda x: int(x, 16) < 128, base16_split)
characters = [chr(int(c, 16)) for c in selected_hexs]
data = ''.join(characters)
# take only part after ruu.vi/#
index = data.find('ruu.vi/#')
if index > -1:
return data[(index + 8):]
return None
except:
return None
def _getDataFormat3(data):
# Search of FF990403 (Manufacturer Specific Data (FF) /
# Ruuvi Innovations ltd (9904) / Format 3 (03))
try:
if '990403' not in data:
return None
payload_start = data.index('990403') + 4
return data[payload_start:]
except:
return None
def _getDataFormat5(data):
# Search of FF990405 (Manufacturer Specific Data (FF) /
# Ruuvi Innovations ltd (9904) / Format 5 (05))
try:
if '990405' not in data:
return None
payload_start = data.index('990405') + 4
return data[payload_start:]
except:
return None
|
class PypactException(Exception):
pass
class PypactFrozenException(PypactException):
pass
class PypactInvalidOptionException(PypactException):
pass
class PypactIncompatibleOptionException(PypactException):
pass
class PypactOutOfRangeException(PypactException):
pass
class PypactSerializeException(PypactException):
pass
class PypactDeserializeException(PypactException):
pass
class PypactFispactExecutableNotFoundException(PypactException):
pass
class PypactTypeException(PypactException):
pass
class PypactUnphysicalValueException(PypactException):
pass
class PypactNotPrintLib4FileException(PypactException):
pass
class PypactNotPrintLib5FileException(PypactException):
pass |
import bpy
class TextureGroupProps(bpy.types.PropertyGroup):
name: bpy.props.StringProperty
color: bpy.props.FloatVectorProperty(
name="Color",
subtype="COLOR",
min=0.0,
max=1.0,
)
class VIEW3D_UL_TextureGroup(bpy.types.UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
layout.label(text="", icon="GROUP")
layout.prop(item, "name", text="")
col = layout.column()
col.ui_units_x = 1.0
col.prop(item, "color", text="")
class TextureGroup_OT_Add(bpy.types.Operator):
bl_idname = "taremin.add_texture_group"
bl_label = "Add Entry"
bl_description = 'hoge'
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
settings = context.scene.taremin_tag
settings.texture_groups.add()
settings.active_texture_group_index = len(settings.texture_groups) - 1
settings.texture_groups[settings.active_texture_group_index].name = "TextureGroup"
return {'FINISHED'}
class TextureGroup_OT_Remove(bpy.types.Operator):
bl_idname = "taremin.remove_texture_group"
bl_label = "Remove Entry"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return context.scene.taremin_tag.active_texture_group_index >= 0
def execute(self, context):
settings = context.scene.taremin_tag
# テクスチャグループを削除するときにそのテクスチャグループを参照しているテクスチャリンクを削除する
remove_index = settings.active_texture_group_index
remove_list = []
for i, link in enumerate(settings.texture_links):
link_type = int(link.ref_type)
if link_type == remove_index:
remove_list.append(i)
if link_type > remove_index:
link.ref_type = str(link_type - 1)
for i in reversed(remove_list):
settings.texture_links.remove(i)
settings.texture_groups.remove(settings.active_texture_group_index)
max_index = len(settings.texture_groups) - 1
if settings.active_texture_group_index > max_index:
settings.active_texture_group_index = max_index
return {'FINISHED'}
class TextureGroup_OT_Up(bpy.types.Operator):
bl_idname = "taremin.up_texture_group"
bl_label = "Up Entry"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return context.scene.taremin_tag.active_texture_group_index > 0
def execute(self, context):
settings = context.scene.taremin_tag
index = settings.active_texture_group_index
settings.texture_groups.move(index, index - 1)
settings.active_texture_group_index = index - 1
return {'FINISHED'}
class TextureGroup_OT_Down(bpy.types.Operator):
bl_idname = "taremin.down_texture_group"
bl_label = "Down Entry"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
settings = context.scene.taremin_tag
max_index = len(settings.texture_groups) - 1
return settings.active_texture_group_index < max_index
def execute(self, context):
settings = context.scene.taremin_tag
index = settings.active_texture_group_index
settings.texture_groups.move(index, index + 1)
settings.active_texture_group_index = index + 1
return {'FINISHED'}
|
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.request import Request
from rest_framework.test import APIClient, APIRequestFactory
from academics import models, serializers
SPECIALIZATION_URL = reverse('academics:specialization-list')
# creating a test request
factory = APIRequestFactory()
request = factory.get('/')
# create serializer context
serializer_context = {'request': Request(request)}
def specialization_detail_url(specialization_id):
"""return url for the specialization detail"""
return reverse('academics:specialization-detail', args=[specialization_id])
def sample_faculty(**kwargs):
"""create and return a sample faculty"""
defaults = {'name': 'Faculty 1'}
defaults.update(kwargs)
return models.Faculty.objects.create(**defaults)
def sample_department(faculty, **kwargs):
"""create and return a sample department"""
defaults = {'name': 'Department 1'}
defaults.update(kwargs)
return models.Department.objects.create(faculty=faculty, **defaults)
def sample_level(**kwargs):
"""create and return a sample level"""
defaults = {'code': 100}
defaults.update(**kwargs)
return models.Level.objects.create(**defaults)
def sample_specialization(department, max_level, **kwargs):
"""create and return a sample specialization"""
defaults = {
'name': 'Specialization 1',
'max_level': max_level,
}
defaults.update(kwargs)
return models.Specialization.objects.create(department=department, **defaults)
def test_all_model_attributes(insance, payload, model, serializer):
"""test model attributes against a payload, with instance being self in a testcase class """
ignored_keys = ['image']
relevant_keys = sorted(set(payload.keys()).difference(ignored_keys))
for key in relevant_keys:
try:
insance.assertEqual(payload[key], getattr(model, key))
except Exception:
insance.assertEqual(payload[key], serializer.data[key])
class PublicSpecializationApiTest(TestCase):
"""test public access to the specialization api"""
def setUp(self):
self.client = APIClient()
def test_authentication_required(self):
"""test that authentication is required"""
res = self.client.get(SPECIALIZATION_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateSpecializationApiTest(TestCase):
"""test authenticated access to the specialization api"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_superuser(
email='test@email.com',
password='testpass'
)
self.client.force_authenticate(self.user)
self.faculty = sample_faculty()
self.department = sample_department(faculty=self.faculty)
self.level = sample_level()
def test_retrieve_specialization(self):
"""test retrieving a list of specializations"""
sample_specialization(department=self.department, max_level=self.level)
specialization = models.Specialization.objects.all()
serializer = serializers.SpecializationSerializer(
specialization,
many=True,
context=serializer_context
)
res = self.client.get(SPECIALIZATION_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data['results'], serializer.data)
def test_retrieve_specialization_detail(self):
"""test retrieving a specialization's detail"""
specialization = sample_specialization(department=self.department, max_level=self.level)
serializer = serializers.SpecializationSerializer(specialization, context=serializer_context)
url = specialization_detail_url(specialization_id=specialization.id)
res = self.client.get(url)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_create_specialization(self):
"""test creating a specialization"""
department = sample_department(faculty=self.faculty, name='Department 2')
department_serializer = serializers.DepartmentSerializer(department, context=serializer_context)
level_serializer = serializers.LevelSerializer(self.level, context=serializer_context)
payload = {
'department': department_serializer.data['url'],
'name': 'Specialization 2',
'max_level': level_serializer.data['url'],
'description': 'some description text',
}
res = self.client.post(SPECIALIZATION_URL, payload)
specialization = models.Specialization.objects.get(id=res.data['id'])
specialization_serializer = serializers.SpecializationSerializer(
specialization,
context=serializer_context
)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
test_all_model_attributes(self, payload, specialization, specialization_serializer)
def test_partial_update_specialization(self):
"""test partially updating a specialization's detail using patch"""
specialization = sample_specialization(department=self.department, max_level=self.level)
payload = {
'description': 'some description text',
}
url = specialization_detail_url(specialization.id)
res = self.client.patch(url, payload)
specialization.refresh_from_db()
specialization_serializer = serializers.SpecializationSerializer(
specialization,
context=serializer_context
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
test_all_model_attributes(self, payload, specialization, specialization_serializer)
def test_full_update_specialization(self):
"""test updating a specialization's detail using put"""
specialization = sample_specialization(department=self.department, max_level=self.level)
department = sample_department(faculty=self.faculty, name='Department 3')
department_serializer = serializers.DepartmentSerializer(department, context=serializer_context)
level_serializer = serializers.LevelSerializer(self.level, context=serializer_context)
payload = {
'department': department_serializer.data['url'],
'name': 'Specialization 3',
'max_level': level_serializer.data['url'],
'description': 'some description text',
}
url = specialization_detail_url(specialization.id)
res = self.client.put(url, payload)
specialization.refresh_from_db()
specialization_serializer = serializers.SpecializationSerializer(
specialization,
context=serializer_context
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
test_all_model_attributes(self, payload, specialization, specialization_serializer)
|
"""
Test the color calibration
With scatter3d set to True, the program draws the 3d scatter plots of the colors of the calibrated images. One of the
calibrated images is simply a matrix of random RGB values. The 3d scatter plotting visualizes whether the colors stay in
the safe space (the [0 1]^3 space) or blow out (out of the [0 1]^3 space)
"""
from color_calibr_ccm.color_calibr_ccm import color_calibration
import matplotlib.pyplot as plt
from color_calibr_ccm.utils import *
from itertools import product, combinations
import cv2
import numpy as np
# Draw the 3D scatter of the colors to visualize the color blow-out (out side of the [0 1]^3 space)
scatter3d = False
src = ["imgs/color_src_s.jpg"]
src_color_space = "sRGB"
src_is_linear = False
ref = ["imgs/color_ref_s.jpg"]
ref_color_space = "sRGB"
ref_is_linear = False
src_imgs = []
for img_url in src:
img = cv2.cvtColor(cv2.imread(img_url), cv2.COLOR_BGR2RGB)
src_imgs.append(img)
ref_imgs = []
for img_url in ref:
img = cv2.cvtColor(cv2.imread(img_url), cv2.COLOR_BGR2RGB)
ref_imgs.append(img)
# Set distance to test calibration with loss:
# distance = "rgb": loss is the MSE of RGB values
# distance = "rgbl": loss is the MSE of linear RGB values
# distance = "de00": loss is the CIEDE2000 error (in CIELAB color space)
distance = "de00"
ccm, error, calibr_imgs = color_calibration(
src_imgs, src_color_space, src_is_linear, ref_imgs, ref_color_space, ref_is_linear, distance=distance, verbose=False
)
print(f"ccm: {ccm}")
print(f"error: {error}")
for img in calibr_imgs:
# Convert RGB to BGR
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
cv2.imwrite("imgs/color_calibr_" + distance + ".jpg", img)
if scatter3d:
# Generate a image with random pixels and check if it oversaturates
n_pixels = 1000
img_rand = np.random.randint(256, size=(n_pixels, 3)) / 255
def scatterRGB3D(img, label):
fig = plt.figure()
ax = fig.add_subplot(projection="3d")
ax.set_xlabel("Red")
ax.set_ylabel("Green")
ax.set_zlabel("Yellow")
for rgb in img:
if np.all(np.logical_and(rgb >= 0, rgb <= 1)):
color = (rgb[0], rgb[1], rgb[2])
else:
color = (0, 0, 0)
ax.scatter3D(rgb[0], rgb[1], rgb[2], marker="o", color=color)
# draw cube
r = [0, 1]
for s, e in combinations(np.array(list(product(r, r, r))), 2):
if np.sum(np.abs(s - e)) == r[1] - r[0]:
ax.plot3D(*zip(s, e), color="gray")
src_img = src_imgs[0]
ref_img = ref_imgs[0]
calibr_img = calibr_imgs[0]
scatterRGB3D(calibr_img / 255, "calibr")
scatterRGB3D(img_rand, "img_rand")
plt.show()
|
from django.db import models
from django.utils import six
from django.core.exceptions import ImproperlyConfigured
import psycopg2.extras
if hasattr(psycopg2.extras, 'Range'):
from psycopg2.extras import (
NumericRange,
DateRange,
DateTimeRange,
DateTimeTZRange,
)
class AbstractRangeField(six.with_metaclass(models.SubfieldBase,
models.Field)):
def __init__(self, verbose_name=None, name=None,
range_type=None, range_class=None, **kwargs):
if range_type is None:
raise ImproperlyConfigured(
"RangeField requires range_type to be specified.")
if range_class is None:
raise ImproperlyConfigured(
"RangeField requires a range_class to be specified.")
self.range_type = range_type
self.range_class = range_class
models.Field.__init__(self, verbose_name, name, **kwargs)
def db_type(self, connection):
return self.range_type
def get_prep_value(self, value):
# `value` should be a three-tuple
if not len(value) == 3:
raise ValueError("range must be a three-tuple")
return self.range_class(value[0], value[1], bounds=value[2])
class IntegerRangeField(AbstractRangeField):
def __init__(self, *args, **kwargs):
kwargs['range_type'] = 'int4rage'
kwargs['range_class'] = NumericRange
super(IntegerRangeField, self).__init__(*args, **kwargs)
class BigIntegerRangeField(AbstractRangeField):
def __init__(self, *args, **kwargs):
kwargs['range_type'] = 'int8range'
kwargs['range_class'] = NumericRange
super(BigIntegerRangeField, self).__init__(*args, **kwargs)
class NumericRangeField(AbstractRangeField):
def __init__(self, *args, **kwargs):
kwargs['range_type'] = 'numrange'
kwargs['range_class'] = NumericRange
super(NumericRangeField, self).__init__(*args, **kwargs)
class TimestampRangeField(AbstractRangeField):
def __init__(self, *args, **kwargs):
kwargs['range_type'] = 'tsrange'
kwargs['range_class'] = DateTimeRange
super(TimestampRangeField, self).__init__(*args, **kwargs)
class TimstampTZRangeField(AbstractRangeField):
def __init__(self, *args, **kwargs):
kwargs['range_type'] = 'tstzrange'
kwargs['range_class'] = DateTimeTZRange
super(TimstampTZRangeField, self).__init__(*args, **kwargs)
class DateRangeField(AbstractRangeField):
def __init__(self, *args, **kwargs):
kwargs['range_type'] = 'daterange'
kwargs['range_class'] = DateRange
super(DateRangeField, self).__init__(*args, **kwargs)
|
from setuptools import setup
from shutil import copy , move
import platform
python_ver = platform.python_version()[0:3]
username = platform.node().split("-")[0]
try:
move(f"colorRandom" , dst=f'/home/{username}/.local/lib/python{python_ver}/site-packages')
except:
print('error to set setup!')
|
import aiohttp
import asyncio
import time
async def hit_api(session, tar):
async with session.get(tar['url']) as response:
await response.text()
await asyncio.sleep(tar['lagsim']) #simulate some data processing lag here
print (f"{tar['metadata']} |||| {tar['url']} responded at {response.headers.get('Date')}.... data processing finished at {time.strftime('%X')}")
async def hit_api_group(sites):
"""
this spawns a common session which is used to call the hit_api function
"""
async with aiohttp.ClientSession() as session:
return await asyncio.gather(*[hit_api(session, x) for x in sites]
,return_exceptions=True
)
async def main(_targets):
"""
adapted from this example: https://github.com/geeogi/async-python-lambda-template/tree/master
aiohttp: https://docs.aiohttp.org/en/stable/
asyncio: https://docs.python.org/3/library/asyncio.html
for each target_group, make its own task with accompaying sessions, so that the # of sessions is dictated by number of target groups in lambda event
"""
print(f"started at {time.strftime('%X')}")
results= [asyncio.create_task(hit_api_group(target)) for target in _targets]
await asyncio.gather(*results)
print(results)
print(f"finished at {time.strftime('%X')}")
def lambda_handler(event, context):
targets=event["target_groups"]
asyncio.run(main(targets))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import logging
import tensorflow as tf
from invoke import run, exceptions
log = logging.getLogger('biomedbert')
log.setLevel(logging.INFO)
def fine_tune_bioasq(model_type: str, bucket_name: str, train_file: str, predict_file: str, model_dir: str,
tpu_name: str, tpu_zone: str, gcp_project: str, tpu_cores: int, squad_folder: str):
"""fine tune bioasq"""
use_tpu = True
config = 'large_bert_config.json'
max_seq_length = 128 # 384
num_tpu_cores = 8
if tpu_cores is not None:
num_tpu_cores = int(tpu_cores)
if tpu_name is None:
tpu_name = 'false'
use_tpu = False
if model_type == 'base':
# bert base
config = 'base_bert_config.json'
elif model_type == 'large':
# bert large
config = 'large_bert_config.json'
else:
log.info('No config file')
sys.exit(1)
init_checkpoint = tf.train.latest_checkpoint('gs://{}/{}/{}'.format(bucket_name, model_dir, squad_folder))
vocab_file = 'gs://{}/{}/vocab.txt'.format(bucket_name, model_dir)
bert_config_file = 'gs://{}/{}/{}'.format(bucket_name, model_dir, config)
output_dir = 'gs://{}/{}/BioASQ_outputs/{}/{}'.format(bucket_name, model_dir, squad_folder, predict_file.split('.')[0])
train_file = 'gs://{}/datasets/QA/BioASQ/{}'.format(bucket_name, train_file)
predict_file = 'gs://{}/datasets/QA/BioASQ/{}'.format(bucket_name, predict_file)
try:
run('python3 biobert/run_qa.py --vocab_file={} '
'--bert_config_file={} --predict_batch_size=128 '
'--init_checkpoint={} --do_train=true --do_predict=true '
'--max_seq_length={} --train_batch_size=32 --learning_rate=5e-6 '
'--doc_stride=128 --num_train_epochs=5.0 --do_lower_case=False '
'--train_file={} --predict_file={} '
'--output_dir={}/ --num_tpu_cores={} --use_tpu={} '
'--tpu_name={} --tpu_zone={} --gcp_project={}'.format(
vocab_file, bert_config_file, init_checkpoint, max_seq_length,
train_file, predict_file, output_dir, num_tpu_cores,
use_tpu, tpu_name, tpu_zone, gcp_project))
except exceptions.UnexpectedExit:
print('Cannot fine tune BioASQ - {}'.format(train_file))
def evaluate_bioasq(bucket_name: str, model_dir: str, predict_file: str, eval_file: str, squad_folder: str):
"""evaluate bioasq"""
# convert results to BioASQ JSON format
try:
output_dir = 'BioASQ_outputs/{}/{}'.format(squad_folder, predict_file.split('.')[0])
if not os.path.exists(output_dir):
run('mkdir -p {}'.format(output_dir))
run('gsutil cp gs://{}/{}/{}/nbest_predictions.json {}'.format(
bucket_name, model_dir, output_dir, output_dir))
run('python3 biobert/biocodes/transform_nbset2bioasqform.py '
'--nbest_path={}/nbest_predictions.json '
'--output_path={}'.format(output_dir, output_dir))
except exceptions.UnexpectedExit:
print('Cannot convert results to BioASQ JSON format')
sys.exit(1)
# run BioAsq evaluation script
try:
if not os.path.exists('Evaluation-Measures'):
run('git clone https://github.com/BioASQ/Evaluation-Measures.git')
run('gsutil cp gs://ekaba-assets/datasets/QA/BioASQ/{} {}'.format(
eval_file, output_dir))
run('cd Evaluation-Measures')
run('java -Xmx10G '
'-cp $CLASSPATH:Evaluation-Measures/flat/BioASQEvaluation/dist/BioASQEvaluation.jar '
'evaluation.EvaluatorTask1b -phaseB -e 5 {}/{} '
'{}/BioASQform_BioASQ-answer.json'.format(output_dir, eval_file, output_dir))
except exceptions.UnexpectedExit:
print('Cannot evaluate BioASQ')
sys.exit(1)
|
"""IBP inmate search utility.
The :py:mod:`pymates` module provides two functions for searching for Texas inmates:
* :py:func:`query_by_inmate_id`
* :py:func:`query_by_name`
Because Texas inmates can be housed in both Federal and state-level institutions,
these functions must search for inmates through the TDCJ and FBOP websites.
The driving utility of this module is that it provides a common interface to
both systems: Search parameters are given, both jurisdictions are searched, and
matching Federal and state-level inmates are returned back. All of this is
done without requiring the user to be concerned with the details.
"""
from .base import query_by_inmate_id, query_by_name
__all__ = ["query_by_inmate_id", "query_by_name"]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.28 on 2020-06-05 20:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gymkhana', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='event_info',
name='event_name',
field=models.CharField(max_length=256),
),
migrations.AlterField(
model_name='event_info',
name='incharge',
field=models.CharField(max_length=256),
),
]
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: sixteen
Description :
Author : joe
date: 2019-08-14
-------------------------------------------------
Change Activity:
2019-08-14:
-------------------------------------------------
"""
import os
'''
Python 文件I/O
本章只讲述所有基本的的I/O函数,更多函数请参考Python标准文档。
打印到屏幕
最简单的输出方法是用print语句,你可以给它传递零个或多个用逗号隔开的表达式
'''
print "Python 是一个非常棒的语言,不是吗?"
'''
读取键盘输入
Python提供了两个内置函数从标准输入读入一行文本,默认的标准输入是键盘
raw_input函数
raw_input([prompt]) 函数从标准输入读取一个行,并返回一个字符串(去掉结尾的换行符)
'''
# str = raw_input("请输入:")
# print "你输入的内容是: ", str
'''
input函数
input([prompt]) 函数和 raw_input([prompt]) 函数基本类似,但是 input 可以接收一个Python表达式作为输入,并将运算结果返回
'''
# str = input("请输入:")
# print "你输入的内容是: ", str
'''
打开和关闭文件
现在,您已经可以向标准输入和输出进行读写。现在,来看看怎么读写实际的数据文件。
Python 提供了必要的函数和方法进行默认情况下的文件基本操作。你可以用 file 对象做大部分的文件操作。
open 函数
你必须先用Python内置的open()函数打开一个文件,创建一个file对象,相关的方法才可以调用它进行读写
语法:
file object = open(file_name [, access_mode][, buffering])
各个参数的细节如下:
file_name:file_name变量是一个包含了你要访问的文件名称的字符串值。
access_mode:access_mode决定了打开文件的模式:只读,写入,追加等。所有可取值见如下的完全列表。这个参数是非强制的,默认文件访问模式为只读(r)。
buffering:如果buffering的值被设为0,就不会有寄存。
如果buffering的值取1,访问文件时会寄存行。
如果将buffering的值设为大于1的整数,表明了这就是的寄存区的缓冲大小。
如果取负值,寄存区的缓冲大小则为系统默认
access_mode
模式 描述
t 文本模式 (默认)。
x 写模式,新建一个文件,如果该文件已存在则会报错。
b 二进制模式。
+ 打开一个文件进行更新(可读可写)。
U 通用换行模式(不推荐)。
r 以只读方式打开文件。文件的指针将会放在文件的开头。这是默认模式。
rb 以二进制格式打开一个文件用于只读。文件指针将会放在文件的开头。这是默认模式。一般用于非文本文件如图片等。
r+ 打开一个文件用于读写。文件指针将会放在文件的开头。
rb+ 以二进制格式打开一个文件用于读写。文件指针将会放在文件的开头。一般用于非文本文件如图片等。
w 打开一个文件只用于写入。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
wb 以二进制格式打开一个文件只用于写入。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。一般用于非文本文件如图片等。
w+ 打开一个文件用于读写。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
wb+ 以二进制格式打开一个文件用于读写。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。一般用于非文本文件如图片等。
a 打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。也就是说,新的内容将会被写入到已有内容之后。如果该文件不存在,创建新文件进行写入。
ab 以二进制格式打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。也就是说,新的内容将会被写入到已有内容之后。如果该文件不存在,创建新文件进行写入。
a+ 打开一个文件用于读写。如果该文件已存在,文件指针将会放在文件的结尾。文件打开时会是追加模式。如果该文件不存在,创建新文件用于读写。
ab+ 以二进制格式打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。如果该文件不存在,创建新文件用于读写。
File对象的属性
一个文件被打开后,你有一个file对象,你可以得到有关该文件的各种信息。
以下是和file对象相关的所有属性的列表:
属性 描述
file.closed 返回true如果文件已被关闭,否则返回false。
file.mode 返回被打开文件的访问模式。
file.name 返回文件的名称。
file.softspace 如果用print输出后,必须跟一个空格符,则返回false。否则返回true。
'''
fo = open("foo.txt", "w")
print "文件名: ", fo.name
print "是否已关闭 : ", fo.closed
print "访问模式 : ", fo.mode
print "末尾是否强制加空格 : ", fo.softspace
'''
close()方法
File 对象的 close()方法刷新缓冲区里任何还没写入的信息,并关闭该文件,这之后便不能再进行写入。
当一个文件对象的引用被重新指定给另一个文件时,Python 会关闭之前的文件。
用 close()方法关闭文件是一个很好的习惯
'''
fo = open("foo.txt", "w")
print "文件名: ", fo.name
# 关闭打开的文件
fo.close()
'''
读写文件:
file对象提供了一系列方法,能让我们的文件访问更轻松。
来看看如何使用read()和write()方法来读取和写入文件。
write()方法
write()方法可将任何字符串写入一个打开的文件。
需要重点注意的是,Python字符串可以是二进制数据,而不是仅仅是文字。
write()方法不会在字符串的结尾添加换行符('\n'):
'''
fo = open("foo.txt", "w")
fo.write("www.runoob.com!\nVery good site!\n")
# 关闭打开的文件
fo.close()
'''
read()方法
read()方法从一个打开的文件中读取一个字符串。
需要重点注意的是,Python字符串可以是二进制数据,而不是仅仅是文字。
语法:
fileObject.read([count])
在这里,被传递的参数是要从已打开文件中读取的字节计数。
该方法从文件的开头开始读入,如果没有传入count,它会尝试尽可能多地读取更多的内容,很可能是直到文件的末尾。
'''
fo = open("foo.txt", "r+")
str = fo.read()
print "读取的字符串是 : ", str
# 关闭打开的文件
fo.close()
'''
文件定位
tell()方法告诉你文件内的当前位置, 换句话说,下一次的读写会发生在文件开头这么多字节之后。
seek(offset [,from])方法改变当前文件的位置。
Offset变量表示要移动的字节数。From变量指定开始移动字节的参考位置。
如果from被设为0,这意味着将文件的开头作为移动字节的参考位置。
如果设为1,则使用当前的位置作为参考位置。
如果它被设为2,那么该文件的末尾将作为参考位置
'''
fo = open("foo.txt", "r+")
str = fo.read(10)
print "读取的字符串是 : ", str
# 查找当前位置
position = fo.tell()
print "当前文件位置 : ", position
# 把指针再次重新定位到文件开头
position = fo.seek(0, 0)
str = fo.read(10)
print "重新读取字符串 : ", str
# 关闭打开的文件
fo.close()
'''
重命名和删除文件
Python的os模块提供了帮你执行文件处理操作的方法,比如重命名和删除文件。
要使用这个模块,你必须先导入它,然后才可以调用相关的各种功能。
rename()方法:
rename()方法需要两个参数,当前的文件名和新文件名
'''
# 重命名文件test1.txt到test2.txt。
os.rename( "foo.txt", "test2.txt" )
'''
remove()方法
你可以用remove()方法删除文件,需要提供要删除的文件名作为参数。
'''
# 删除一个已经存在的文件test2.txt
os.remove("test2.txt")
'''
Python里的目录:
所有文件都包含在各个不同的目录下,不过Python也能轻松处理。
os模块有许多方法能帮你创建,删除和更改目录。
mkdir()方法
可以使用os模块的mkdir()方法在当前目录下创建新的目录们。
你需要提供一个包含了要创建的目录名称的参数
'''
# 创建目录test
# os.mkdir("test")
'''
chdir()方法
可以用chdir()方法来改变当前的目录。
chdir()方法需要的一个参数是你想设成当前目录的目录名称
'''
# 将当前目录改为"/home/newdir"
# os.chdir("/home/newdir")
# 给出当前的目录
print os.getcwd()
'''
rmdir()方法
rmdir()方法删除目录,目录名称以参数传递。
在删除这个目录之前,它的所有内容应该先被清除
'''
# 删除”/tmp/test”目录
os.rmdir( "test" )
if __name__ == '__main__':
pass |
from os.path import join, isfile
import copy
from urllib.parse import urlparse
import urllib.request
import subprocess
import click
from google.protobuf.descriptor import FieldDescriptor
import boto3
import botocore
from rastervision.protos.chain_workflow_pb2 import ChainWorkflowConfig
from rastervision.protos.compute_raster_stats_pb2 import (
ComputeRasterStatsConfig)
from rastervision.protos.make_training_chips_pb2 import (
MakeTrainingChipsConfig)
from rastervision.protos.train_pb2 import TrainConfig
from rastervision.protos.predict_pb2 import PredictConfig
from rastervision.protos.eval_pb2 import EvalConfig
from rastervision.protos.label_store_pb2 import (
LabelStore as LabelStoreConfig, ObjectDetectionGeoJSONFile as
ObjectDetectionGeoJSONFileConfig, ClassificationGeoJSONFile as
ClassificationGeoJSONFileConfig, SegmentationRasterFile as
SegmentationRasterFileConfig)
from rastervision.utils.files import (load_json_config, save_json_config,
file_to_str, str_to_file)
from rastervision.utils.batch import _batch_submit
from rastervision import run
COMPUTE_RASTER_STATS = 'compute_raster_stats'
MAKE_TRAINING_CHIPS = 'make_training_chips'
TRAIN = 'train'
PREDICT = 'predict'
EVAL = 'eval'
ALL_TASKS = [COMPUTE_RASTER_STATS, MAKE_TRAINING_CHIPS, TRAIN, PREDICT, EVAL]
validated_uri_fields = set(
[('rv.protos.ObjectDetectionGeoJSONFile',
'uri'), ('rv.protos.ClassificationGeoJSONFile', 'uri'),
('rv.protos.GeoTiffFiles', 'uris'), ('rv.protos.ImageFile', 'uri'),
('rv.protos.TrainConfig.Options',
'backend_config_uri'), ('rv.protos.TrainConfig.Options',
'pretrained_model_uri')])
s3 = boto3.resource('s3')
def make_command(command, config_uri):
return 'python -m rastervision.run {} {}'.format(command, config_uri)
class PathGenerator(object):
def __init__(self, uri_map, raw_dataset_key, dataset_key, model_key,
prediction_key, eval_key):
rv_root = uri_map['rv_root']
self.raw_dataset_uri = join(rv_root, 'rv-output', 'raw-datasets',
raw_dataset_key)
self.dataset_uri = join(self.raw_dataset_uri, 'datasets', dataset_key)
self.model_uri = join(self.dataset_uri, 'models', model_key)
self.prediction_uri = join(self.model_uri, 'predictions',
prediction_key)
self.eval_uri = join(self.prediction_uri, 'evals', eval_key)
self.compute_raster_stats_config_uri = self.get_config_uri(
self.raw_dataset_uri)
self.make_training_chips_config_uri = self.get_config_uri(
self.dataset_uri)
self.train_config_uri = self.get_config_uri(self.model_uri)
self.predict_config_uri = self.get_config_uri(self.prediction_uri)
self.eval_config_uri = self.get_config_uri(self.eval_uri)
self.compute_raster_stats_output_uri = self.get_output_uri(
self.raw_dataset_uri)
self.make_training_chips_output_uri = self.get_output_uri(
self.dataset_uri)
self.train_output_uri = self.get_output_uri(self.model_uri)
self.prediction_output_uri = self.get_output_uri(self.prediction_uri)
self.eval_output_uri = self.get_output_uri(self.eval_uri)
def get_config_uri(self, prefix_uri):
return join(prefix_uri, 'config.json')
def get_output_uri(self, prefix_uri):
return join(prefix_uri, 'output')
def is_branch_valid(branch):
ls_branch_command = [
'git', 'ls-remote', '--heads',
'https://github.com/azavea/raster-vision.git', branch
]
if not subprocess.run(ls_branch_command, stdout=subprocess.PIPE).stdout:
print('Error: remote branch {} does not exist'.format(branch))
return False
return True
def is_uri_valid(uri):
parsed_uri = urlparse(uri)
if parsed_uri.scheme == 's3':
try:
s3.Object(parsed_uri.netloc, parsed_uri.path[1:]).load()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print('Error: URI cannot be found: {}'.format(uri))
print(e)
return False
elif parsed_uri.scheme in ['http', 'https']:
with urllib.request.urlopen(uri) as response:
try:
response.read(1)
except Exception:
print('Error: URI cannot be found: {}'.format(uri))
return False
else:
if not isfile(uri):
print('Error: URI cannot be found: {}'.format(uri))
return False
return True
def is_validated_uri_field(message_type, field_name):
return (message_type, field_name) in validated_uri_fields
def is_config_valid(config):
# If config is primitive, do nothing.
if not hasattr(config, 'ListFields'):
return True
message_type = config.DESCRIPTOR.full_name
is_valid = True
for field_desc, field_val in config.ListFields():
field_name = field_desc.name
if is_validated_uri_field(message_type, field_name):
if field_name.endswith('uri'):
is_valid = is_uri_valid(field_val) and is_valid
if field_name.endswith('uris'):
for uri in field_val:
is_valid = is_uri_valid(uri) and is_valid
# Recurse.
if field_desc.label == FieldDescriptor.LABEL_REPEATED:
for field_val_item in field_val:
is_valid = \
is_config_valid(field_val_item) and is_valid
else:
is_valid = is_config_valid(field_val) and is_valid
return is_valid
def apply_uri_map(config, uri_map):
"""Do parameter substitution on any URI fields."""
def _apply_uri_map(config):
# If config is primitive, do nothing.
if not hasattr(config, 'ListFields'):
return
# For each field in message, update its value if the name ends with
# uri or uris.
for field_desc, field_val in config.ListFields():
field_name = field_desc.name
if field_name.endswith('uri'):
new_uri = field_val.format(**uri_map)
setattr(config, field_name, new_uri)
if field_name.endswith('uris'):
for ind, uri in enumerate(field_val):
new_uri = uri.format(**uri_map)
field_val[ind] = new_uri
# Recurse.
if field_desc.label == FieldDescriptor.LABEL_REPEATED:
for field_val_item in field_val:
_apply_uri_map(field_val_item)
else:
_apply_uri_map(field_val)
new_config = config.__deepcopy__()
_apply_uri_map(new_config)
return new_config
class ChainWorkflow(object):
def __init__(self, workflow_uri, remote=False):
self.workflow = load_json_config(workflow_uri, ChainWorkflowConfig())
self.uri_map = (self.workflow.remote_uri_map
if remote else self.workflow.local_uri_map)
is_valid = is_config_valid(apply_uri_map(self.workflow, self.uri_map))
if not is_valid:
exit()
self.path_generator = PathGenerator(
self.uri_map, self.workflow.raw_dataset_key,
self.workflow.dataset_key, self.workflow.model_key,
self.workflow.prediction_key, self.workflow.eval_key)
self.update_raster_transformer()
self.update_scenes()
def update_raster_transformer(self):
stats_uri = join(self.path_generator.compute_raster_stats_output_uri,
'stats.json')
self.workflow.raster_transformer.stats_uri = stats_uri
def update_scenes(self):
for idx, scene in enumerate(self.workflow.train_scenes):
if len(scene.id) < 1:
scene.id = 'train-{}'.format(idx)
# Set raster_tranformer for raster_sources
scene.raster_source.raster_transformer.MergeFrom(
self.workflow.raster_transformer)
for idx, scene in enumerate(self.workflow.test_scenes):
if len(scene.id) < 1:
scene.id = 'eval-{}'.format(idx)
scene.raster_source.raster_transformer.MergeFrom(
self.workflow.raster_transformer)
# Set prediction_label_store from generated URI.
scene.prediction_label_store.MergeFrom(
self.make_prediction_label_store(scene))
for idx, scene in enumerate(self.workflow.predict_scenes):
if len(scene.id) < 1:
scene.id = 'predict-{}'.format(idx)
scene.raster_source.raster_transformer.MergeFrom(
self.workflow.raster_transformer)
# Set prediction_label_store from generated URI.
scene.prediction_label_store.MergeFrom(
self.make_prediction_label_store(scene))
def make_prediction_label_store(self, scene):
label_store = scene.ground_truth_label_store
label_store_type = label_store.WhichOneof('label_store_type')
prediction_uri = join(self.path_generator.prediction_output_uri,
'{}.json'.format(scene.id))
prediction_raster = join(self.path_generator.prediction_output_uri,
'{}.tif'.format(scene.id))
if label_store_type == 'object_detection_geojson_file':
geojson_file = ObjectDetectionGeoJSONFileConfig(uri=prediction_uri)
return LabelStoreConfig(object_detection_geojson_file=geojson_file)
elif label_store_type == 'classification_geojson_file':
geojson_file = ClassificationGeoJSONFileConfig(uri=prediction_uri)
return LabelStoreConfig(classification_geojson_file=geojson_file)
elif label_store_type == 'segmentation_raster_file':
raster_file = SegmentationRasterFileConfig(sink=prediction_raster)
return LabelStoreConfig(segmentation_raster_file=raster_file)
else:
raise ValueError(
'Not sure how to generate label source config for type {}'
.format(label_store_type))
def get_compute_raster_stats_config(self):
config = ComputeRasterStatsConfig()
scenes = copy.deepcopy(self.workflow.train_scenes)
scenes.extend(self.workflow.test_scenes)
scenes.extend(self.workflow.predict_scenes)
for scene in scenes:
# Set the raster_transformer so its fields are null since
# compute_raster_stats will generate stats_uri.
raster_source = copy.deepcopy(scene.raster_source)
raster_source.raster_transformer.stats_uri = ''
config.raster_sources.extend([raster_source])
config.stats_uri = self.workflow.raster_transformer.stats_uri
config = apply_uri_map(config, self.uri_map)
return config
def get_make_training_chips_config(self):
config = MakeTrainingChipsConfig()
config.train_scenes.MergeFrom(self.workflow.train_scenes)
config.validation_scenes.MergeFrom(self.workflow.test_scenes)
config.machine_learning.MergeFrom(self.workflow.machine_learning)
config.options.MergeFrom(self.workflow.make_training_chips_options)
config.options.chip_size = self.workflow.chip_size
config.options.debug = self.workflow.debug
config.options.output_uri = \
self.path_generator.make_training_chips_output_uri
config = apply_uri_map(config, self.uri_map)
return config
def get_train_config(self):
config = TrainConfig()
config.machine_learning.MergeFrom(self.workflow.machine_learning)
config.options.MergeFrom(self.workflow.train_options)
config.options.training_data_uri = \
self.path_generator.make_training_chips_output_uri
config.options.output_uri = \
self.path_generator.train_output_uri
# Copy backend config so that it is nested under model_uri. This way,
# all config files and corresponding output of RV will be located next
# to each other in the file system.
backend_config_copy_uri = join(self.path_generator.model_uri,
'backend.config')
backend_config_uri = config.options.backend_config_uri.format(
**self.uri_map)
backend_config_str = file_to_str(backend_config_uri)
str_to_file(backend_config_str, backend_config_copy_uri)
config.options.backend_config_uri = backend_config_copy_uri
config = apply_uri_map(config, self.uri_map)
return config
def get_predict_config(self):
config = PredictConfig()
config.machine_learning.MergeFrom(self.workflow.machine_learning)
config.scenes.MergeFrom(self.workflow.test_scenes)
config.scenes.MergeFrom(self.workflow.predict_scenes)
config.options.MergeFrom(self.workflow.predict_options)
config.options.debug = self.workflow.debug
config.options.debug_uri = join(
self.path_generator.prediction_output_uri, 'debug')
config.options.chip_size = self.workflow.chip_size
config.options.model_uri = join(self.path_generator.train_output_uri,
'model')
config.options.prediction_package_uri = join(
self.path_generator.prediction_output_uri, 'predict-package.zip')
config = apply_uri_map(config, self.uri_map)
return config
def get_eval_config(self):
config = EvalConfig()
config.machine_learning.MergeFrom(self.workflow.machine_learning)
config.scenes.MergeFrom(self.workflow.test_scenes)
config.options.MergeFrom(self.workflow.eval_options)
config.options.debug = self.workflow.debug
config.options.output_uri = join(self.path_generator.eval_output_uri,
'eval.json')
config = apply_uri_map(config, self.uri_map)
return config
def save_configs(self, tasks):
print('Generating and saving config files...')
if COMPUTE_RASTER_STATS in tasks:
save_json_config(
self.get_compute_raster_stats_config(),
self.path_generator.compute_raster_stats_config_uri)
if MAKE_TRAINING_CHIPS in tasks:
save_json_config(
self.get_make_training_chips_config(),
self.path_generator.make_training_chips_config_uri)
if TRAIN in tasks:
save_json_config(self.get_train_config(),
self.path_generator.train_config_uri)
if PREDICT in tasks:
save_json_config(self.get_predict_config(),
self.path_generator.predict_config_uri)
if EVAL in tasks:
save_json_config(self.get_eval_config(),
self.path_generator.eval_config_uri)
def remote_run(self, tasks, branch):
if not is_branch_valid(branch):
exit()
# Run everything in GPU queue since Batch doesn't seem to
# handle dependencies across different queues.
parent_job_ids = []
if COMPUTE_RASTER_STATS in tasks:
command = make_command(
COMPUTE_RASTER_STATS,
self.path_generator.compute_raster_stats_config_uri)
job_id = _batch_submit(branch, command, attempts=1, gpu=True)
parent_job_ids = [job_id]
if MAKE_TRAINING_CHIPS in tasks:
command = make_command(
MAKE_TRAINING_CHIPS,
self.path_generator.make_training_chips_config_uri)
job_id = _batch_submit(
branch,
command,
attempts=1,
gpu=True,
parent_job_ids=parent_job_ids)
parent_job_ids = [job_id]
if TRAIN in tasks:
command = make_command(TRAIN, self.path_generator.train_config_uri)
job_id = _batch_submit(
branch,
command,
attempts=1,
gpu=True,
parent_job_ids=parent_job_ids)
parent_job_ids = [job_id]
if PREDICT in tasks:
command = make_command(PREDICT,
self.path_generator.predict_config_uri)
job_id = _batch_submit(
branch,
command,
attempts=1,
gpu=True,
parent_job_ids=parent_job_ids)
parent_job_ids = [job_id]
if EVAL in tasks:
command = make_command(EVAL, self.path_generator.eval_config_uri)
job_id = _batch_submit(
branch,
command,
attempts=1,
gpu=True,
parent_job_ids=parent_job_ids)
def local_run(self, tasks):
if COMPUTE_RASTER_STATS in tasks:
run._compute_raster_stats(
self.path_generator.compute_raster_stats_config_uri)
if MAKE_TRAINING_CHIPS in tasks:
run._make_training_chips(
self.path_generator.make_training_chips_config_uri)
if TRAIN in tasks:
run._train(self.path_generator.train_config_uri)
if PREDICT in tasks:
run._predict(self.path_generator.predict_config_uri)
if EVAL in tasks:
run._eval(self.path_generator.eval_config_uri)
def _main(workflow_uri,
tasks,
remote=False,
simulated_remote=False,
branch='develop',
run=False):
if len(tasks) == 0:
tasks = ALL_TASKS
for task in tasks:
if task not in ALL_TASKS:
raise Exception("Task '{}' is not a valid task.".format(task))
workflow = ChainWorkflow(workflow_uri, remote=(remote or simulated_remote))
workflow.save_configs(tasks)
if run:
if remote:
workflow.remote_run(tasks, branch)
else:
workflow.local_run(tasks)
@click.command()
@click.argument('workflow_uri')
@click.argument('tasks', nargs=-1)
@click.option('--remote', is_flag=True)
@click.option('--simulated-remote', is_flag=True)
@click.option('--branch', default='develop')
@click.option('--run', is_flag=True)
def main(workflow_uri, tasks, remote, simulated_remote, branch, run):
_main(
workflow_uri,
tasks,
remote=remote,
simulated_remote=simulated_remote,
branch=branch,
run=run)
if __name__ == '__main__':
main()
|
from cerberus import Validator
import boto3
def handler(event, context):
print 'test'
print event
schema = {'name': {'type': 'string'}}
v = Validator()
validation = v.validate(event, schema)
if not validation:
print('invalid')
raise Exception('Validation error')
client = boto3.client('sqs', endpoint_url='http://localhost:4576')
response = client.send_message(
QueueUrl='http://localhost:4576/queue/test',
MessageBody=event['name']
)
print(response)
return {'foo': 'bar'}
|
import flask
import os
import glob
import re
import pymysql.cursors
from donut import auth_utils
from donut.modules.editor.edit_permission import EditPermission
# In seconds
TIMEOUT = 60 * 3
def change_lock_status(title, new_lock_status, default=False, forced=False):
"""
This is called when a user starts or stops editing a
page
"""
title = title.replace(" ", "_")
if default:
return
# If this function is called from
# is_locked due to the page being expired...
if forced:
update_lock_query(title, new_lock_status)
return
# This is mainly because there were pages already created that weren't in
# the database.
uid = auth_utils.get_user_id(flask.session['username'])
query = """SELECT last_edit_uid FROM webpage_files WHERE title = %s"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, title)
res = cursor.fetchone()
# If the page isn't locked before OR if the last user who edited this
# Is the same person
if not is_locked(title) or res['last_edit_uid'] == uid:
update_lock_query(title, new_lock_status)
def update_lock_query(title, new_lock_status):
"""
Query for updating lock status
"""
title = title.replace(" ", "_")
query = """
UPDATE webpage_files
SET locked = %s, last_edit_time = NOW(), last_edit_uid = %s
WHERE title = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(
query, (new_lock_status,
auth_utils.get_user_id(flask.session['username']), title))
def is_locked(title, default=False):
"""
Gets the edit lock status of the current request page.
If we are landing in the default page, automatically return True
"""
if default:
return False
title = title.replace(" ", "_")
query = """
SELECT locked, TIMESTAMPDIFF(SECOND, last_edit_time, NOW()) as expired
FROM webpage_files WHERE title = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, title)
res = cursor.fetchone()
if res == None:
return False
# Locking the file times out after 3 minutes (since we are
# updating the last access time every 1 minute, and we generously account for
# some lag ).
if res['expired'] >= TIMEOUT:
change_lock_status(title, False, forced=True)
return False
return res['locked']
def create_page_in_database(title, content):
"""
There are some pages that exist but do not have entries in the
database.
"""
title = title.replace(" ", "_")
query = """
INSERT INTO webpage_files (title, content) VALUES (%s, %s) ON DUPLICATE KEY UPDATE locked = locked, content = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, [title, content, content])
def rename_title(old_filename, new_filename):
"""
Changes the file name of an html file
"""
old_filename = old_filename.replace(" ", "_")
new_filename = new_filename.replace(" ", "_")
query = """
UPDATE webpage_files SET title = %s WHERE title = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, [new_filename, old_filename])
def read_markdown(title):
title = title.replace(" ", "_")
query = """SELECT content FROM webpage_files
WHERE title = %s"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, [title])
res = cursor.fetchone()
return res['content'] if res != None else None
def read_file(path):
'''
Reads in a file
'''
if not os.path.isfile(path):
return ''
with open(path) as f:
return f.read()
def get_links():
query = """SELECT title FROM webpage_files"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, [])
res = cursor.fetchall()
results = {
key['title']: flask.url_for('uploads.display', url=key['title'])
for key in res
}
return results
def clean_file_names(path, links):
"""
Stripes a few things from the glob links
"""
return [
link.replace(path + '/', '').replace('.md', '').replace('_', ' ')
for link in links
]
def remove_file_from_db(filename):
"""
Removes the information for a file from the db
"""
filename = filename.replace(' ', '_')
query = """DELETE FROM webpage_files WHERE title = %s"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, filename)
def check_duplicate(filename):
"""
Check to see if there are duplicate file names
"""
filename = filename.replace(' ', '_')
query = """SELECT title FROM webpage_files WHERE title = %s"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, [filename])
res = cursor.fetchone()
return False if res is None else True
def check_title(title):
"""
Makes sure the title is valid,
Allows all numbers and characters. Allows ".", "_", "-"
"""
return len(title) < 100 and re.match(r'^[0-9a-zA-Z./\-_: ]*$',
title) != None
def check_edit_page_permission():
"""
Checks if the user has permission to edit a page
"""
return auth_utils.check_login() and auth_utils.check_permission(
flask.session['username'], EditPermission.ABLE)
|
#! /usr/bin/env python3
""" Full-Monty Python3 and the Holy Grail """
__copyright__ = "Copyright (C) 2009, Innovations Anonymous"
__version__ = "4.0"
__license__ = "Public Domain"
__status__ = "Development"
__author__ = "Brahmjot Singh"
__maintainer__ = "Brahmjot Singh"
__email__ = "InnovAnon-Inc@protonmail.com"
__contact__ = "(801) 448-7855"
__credits__ = [
"https://stackoverflow.com/questions/70936788/out-of-core-external-memory-combinatorics-in-python",
]
import ast
#from dask import delayed
from itertools import product as ip
#from joblib import delayed, Parallel
from collections.abc import Iterable
#from pprint import pprint
from random import randrange, choice, random, getrandbits
from string import ascii_letters, digits, punctuation
from types import GeneratorType
#import numpy as np
#from tatsu.ast import AST
#from tatsu.objectmodel import Node
#from tatsu.semantics import ModelBuilderSemantics
#import tatsu
#from tatsu.walkers import NodeWalker
from cg_abs import CGAbs
#from cg_type import CGType
#from cg import CG
#np.random.seed(1)
from functools import wraps
from dask import bag, delayed
# TODO
#def product(*args, repeat=1):
# # product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# # product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
# pools = [tuple(pool) for pool in args] * repeat
# result = [[]]
# for pool in pools:
# result = [x+[y] for x in result for y in pool]
# for prod in result:
# yield tuple(prod)
#def product(*args, repeat=1):
# r = ip(*args, repeat=repeat)
# return bag.from_sequence(r)
def product(*funcs, repeat=None):
__credits__ = [
"https://stackoverflow.com/questions/70936788/out-of-core-external-memory-combinatorics-in-python",
]
if not funcs:
yield ()
return
if repeat is not None:
funcs *= repeat
func, *rest = funcs
for val in func():
for res in product(*rest):
yield (val, ) + res
from functools import partial
#values = product(partial(gen1, arg1, arg2), partial(gen2, arg1))
#root = dbopen('test.fs')
def out_of_core(func):
@wraps(func)
def eager(*args, **kwargs):
#print(func.__name__ + " was called")
#r = delayed(func)(*args, **kwargs)
#return bag.from_delayed(r)
#root['A'] = A = ZBigArray((10,), object)
#transaction.commit()
#return A
#r = func(*args, **kwargs)
#return bag.from_sequence(r)
return func(*args, **kwargs)
return eager
def trace(func):
@wraps(func)
def log(*args, **kwargs):
#i = '\t' *
#print("enter %s(%s, %s)" % (func, args, kwargs,), flush=True)
#print("enter %s" % (func.__name__,), flush=True)
r = func(*args, **kwargs)
#print("leave %s(%s, %s)" % (func, args, kwargs,), flush=True)
return r
return log
class CG(object):
def __init__(self, max_rd=3):
self.max_rd = max_rd
@trace
def build_module_ast(self):
#pprint("build_module()")
#A = delayed(self.make_Module)()
A = self.make_Module()
#pprint("build_module A: %s" % (A,))
for a in A:#.compute():
assert not isinstance(a, GeneratorType)
#pprint("build_module a: %s" % (a,))
a = ast.fix_missing_locations(a)
#pprint("build_module a: %s" % (a,))
yield a
@trace
def compile_module(self):
A = self.build_module_ast()
for a in A:
assert a is not None
try:
b = compile(a, filename="", mode='exec', optimize=2)
#pprint("compile_module b: %s" % (b,))
yield a, b
#except(SyntaxError, ValueError): pass
#except TypeError as e: #pprint("TypeError: %s %s %s" % (e, a, b,))
except SyntaxError: pass
except ValueError as e:
#pprint("ValueError: %s %s %s" % (e, a, b,))
yield a, None
@trace
def exec_module(self):
A = self.compile_module()
for a, b in A:
assert a is not None
if b is None: continue # yield a, b, None
try:
c = exec(b)
#pprint("exec_module b: %s" % (b,))
yield a, b, c
except Exception as e:
#pprint("Error: %s %s %s %s" % (e, a, b, c,))
yield a, b, None
@trace
def build_expression_ast(self):
#pprint("build_expression()")
A = self.make_Expression()
for a in A:
assert not isinstance(a, GeneratorType)
#pprint("build_expression a: %s" % (a,))
a = ast.fix_missing_locations(a)
#pprint("build_expression a: %s" % (a,))
yield a
@trace
def compile_expression(self):
A = self.build_expression_ast()
for a in A:
try:
a = compile(a, filename="", mode='eval', optimize=2)
#pprint("compile_expression a: %s" % (a,))
yield a
except SyntaxError: pass
@trace
def exec_expression(self):
A = self.compile_expression()
for a in A:
try:
b = eval(a)
#pprint("exec_expression b: %s" % (a,))
yield a, b
except: pass
@out_of_core
@trace
def choice(self, C):
#pprint("choice(C=%s)" % (C,))
# TODO
for c in C:
#pprint("choice c: %s" % (c,))
yield c
#@delayed
@out_of_core
@trace
def make_star(self, f, d):
#pprint("make_star(f=%s)" % (f,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
N = 10 # TODO
S = []
#yield S
#yield None
yield []
for n in range(N):
#S.append(f(d+1)) # f() -> GeneratorType
#S.append(f(d)) # f() -> GeneratorType
S.append(partial(f, d)) # f() -> GeneratorType
#yield S
# TODO
yield from product(*S)
#yield from delayed(product)(*S)
#for k in product(*S):
# assert not isinstance(k, GeneratorType)
# yield k
@out_of_core
@trace
def make_optional(self, f, d):
#pprint("make_optional(f=%s)" % (f,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield None
#yield []
#yield [f(d+1)] # TODO from?
#yield [f(d)] # TODO from?
yield from f(d)
@out_of_core
@trace
def make_mod(self, d=0):
#pprint("make_mod(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
choices = [
self.make_Module,
self.make_Interactive,
self.make_Expression,
self.make_FunctionType,
]
for c in self.choice(choices):
assert not isinstance(c, GeneratorType)
#pprint("make_mod c: %s" % (c,), indent=d)
yield from c(d)
#@delayed
@out_of_core
@trace
def make_Module(self, d=0):
#pprint("make_Module(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#body = self.make_star(self.make_stmt, d)
#type_ignores = self.make_star(self.make_type_ignore, d)
body = partial(self.make_star, self.make_stmt, d)
type_ignores = partial(self.make_star, self.make_type_ignore, d)
for b, ti in product(body, type_ignores):
assert not isinstance(b, GeneratorType)
assert not isinstance(ti, GeneratorType)
assert isinstance(b, Iterable)
#pprint("make_Module b: %s, ti: %s" % (b, ti,), indent=d)
assert len(b) == 0 or not isinstance(b[0], GeneratorType)
yield ast.Module(body=list(b), type_ignores=list(ti))
@out_of_core
@trace
def make_Interactive(self, d=0):
#pprint("make_Interactive(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
body = self.make_star(self.make_stmt, d)
for b in body:
assert not isinstance(b, GeneratorType)
#pprint("make_Interactive b: %s" % (b,), indent=d)
yield ast.Interactive(body=list(b))
@out_of_core
@trace
def make_Expression(self, d=0):
#pprint("make_Expression(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
body = self.make_star(self.make_stmt, d)
for b in body:
assert not isinstance(b, GeneratorType)
#pprint("make_Expression b: %s" % (b,), indent=d)
yield ast.Expression(body=list(b))
@out_of_core
@trace
def make_FunctionType(self, d=0):
#pprint("make_FunctionType(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#argtypes = self.make_star(self.make_expr, d)
#returns = self.make_expr(d) # TODO
argtypes = partial(self.make_star, self.make_expr, d)
returns = partial(self.make_expr, d) # TODO
for a, r in product(argtypes, returns):
assert not isinstance(a, GeneratorType)
assert not isinstance(r, GeneratorType)
#pprint("make_FunctionType a: %s, r: %s" % (a, r,), indent=d)
yield ast.FunctionType(argtypes=a, returns=r)
@out_of_core
@trace
def make_stmt(self, d):
#pprint("make_stmt(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
choices = [
self.make_FunctionDef,
self.make_AsyncFunctionDef,
self.make_ClassDef,
self.make_Return,
self.make_Delete,
self.make_Assign,
self.make_AugAssign,
self.make_AnnAssign,
self.make_For,
self.make_AsyncFor,
self.make_While,
self.make_If,
self.make_With,
self.make_AsyncWith,
self.make_Match,
self.make_Raise,
self.make_Try,
self.make_Assert,
self.make_Import,
self.make_ImportFrom,
self.make_Global,
self.make_Nonlocal,
self.make_Expr,
self.make_Pass,
self.make_Break,
self.make_Continue,
]
for c in self.choice(choices):
assert not isinstance(c, GeneratorType)
#pprint("make_stmt c: %s" % (c,), indent=d)
#for k in c(d):
# assert not isinstance(k, GeneratorType)
# #pprint("make_stmt k: %s" % (k,), indent=d)
# yield k
yield from c(d)
@out_of_core
@trace
def make_FunctionDef(self, d):
#pprint("make_FunctionDef(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#name = self.make_identifier(d)
#args = self.make_arguments(d)
#body = self.make_star(self.make_stmt, d)
#decorator_list = self.make_star(self.make_expr, d)
#returns = self.make_optional(self.make_expr, d)
#type_comment = self.make_optional(self.make_string, d)
name = partial(self.make_identifier, d)
args = partial(self.make_arguments, d)
body = partial(self.make_star, self.make_stmt, d)
decorator_list = partial(self.make_star, self.make_expr, d)
returns = partial(self.make_optional, self.make_expr, d)
type_comment = partial(self.make_optional, self.make_string, d)
for n, a, b, dl, r, tc in product(name, args, body, decorator_list, returns, type_comment):
assert not isinstance(n, GeneratorType)
assert not isinstance(a, GeneratorType)
assert not isinstance(b, GeneratorType)
assert not isinstance(dl, GeneratorType)
assert not isinstance(r, GeneratorType)
assert not isinstance(tc, GeneratorType)
#pprint("make_FunctionDef n: %s, a: %s, b: %s, dl: %s, r: %s, tc: %s" % (n, a, b, dl, r, tc,), indent=d)
yield ast.FunctionDef(name=n, args=a, body=list(b), decorator_list=dl, returns=r, type_comment=tc)
@out_of_core
@trace
def make_AsyncFunctionDef(self, d):
#pprint("make_AsyncFunctionDef(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#name = self.make_identifier(d)
#args = self.make_arguments(d)
#body = self.make_star(self.make_stmt, d)
#decorator_list = self.make_star(self.make_expr, d)
#returns = self.make_optional(self.make_expr, d)
#type_comment = self.make_optional(self.make_string, d)
name = partial(self.make_identifier, d)
args = partial(self.make_arguments, d)
body = partial(self.make_star, self.make_stmt, d)
decorator_list = partial(self.make_star, self.make_expr, d)
returns = partial(self.make_optional, self.make_expr, d)
type_comment = partial(self.make_optional, self.make_string, d)
for n, a, b, dl, r, tc in product(name, args, body, decorator_list, returns, type_comment):
assert not isinstance(n, GeneratorType)
assert not isinstance(a, GeneratorType)
assert not isinstance(b, GeneratorType)
assert not isinstance(dl, GeneratorType)
assert not isinstance(r, GeneratorType)
assert not isinstance(tc, GeneratorType)
#pprint("make_AsyncFunctionDef n: %s, a: %s, b: %s, dl: %s, r: %s, tc: %s" % (n, a, b, dl, r, tc,), indent=d)
yield ast.AsyncFunctionDef(name=n, args=a, body=list(b), decorator_list=dl, returns=r, type_comment=tc)
@out_of_core
@trace
def make_ClassDef(self, d):
#pprint("make_ClassDef(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#name = self.make_identifier(d)
#bases = self.make_star(self.make_expr, d)
#keywords = self.make_star(self.make_keyword, d)
#body = self.make_star(self.make_stmt, d)
#decorator_list = self.make_star(self.make_expr, d)
name = partial(self.make_identifier, d)
bases = partial(self.make_star, self.make_expr, d)
keywords = partial(self.make_star, self.make_keyword, d)
body = partial(self.make_star, self.make_stmt, d)
decorator_list = partial(self.make_star, self.make_expr, d)
for n, ba, k, bo, dl in product(name, bases, keywords, body, decorator_list):
assert not isinstance(n, GeneratorType)
assert not isinstance(ba, GeneratorType)
assert not isinstance(k, GeneratorType)
assert not isinstance(bo, GeneratorType)
assert not isinstance(dl, GeneratorType)
#pprint("make_ClassDef n: %s, ba: %s, k: %s, bo: %s, dl: %s" % (n, ba, k, bo, dl,), indent=d)
yield ast.ClassDef(name=n, bases=ba, keywords=k, body=list(bo), decorator_list=dl)
@out_of_core
@trace
def make_Return(self, d):
#pprint("make_Return(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
value = self.make_optional(self.make_expr, d)
#value = partial(self.make_optional, self.make_expr, d)
for v in value:
assert not isinstance(v, GeneratorType)
#pprint("make_Return v: %s" % (v,), indent=d)
yield ast.Return(value=v)
@out_of_core
@trace
def make_Delete(self, d):
#pprint("make_Delete(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
targets = self.make_star(self.make_expr, d)
#targets = partial(self.make_star, self.make_expr, d)
for t in targets:
assert not isinstance(t, GeneratorType)
#pprint("make_Delete t: %s" % (t,), indent=d)
yield ast.Delete(targets=t)
@out_of_core
@trace
def make_Assign(self, d):
#pprint("make_Assign(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
targets = partial(self.make_star, self.make_expr, d)
value = partial(self.make_expr, d)
type_comment = partial(self.make_optional, self.make_string, d)
for t, v, tc in product(targets, value, type_comment):
assert not isinstance(t, GeneratorType)
assert not isinstance(v, GeneratorType)
assert not isinstance(tc, GeneratorType)
#pprint("make_Assign t: %s, v: %s, tc: %s" % (t, v, tc,), indent=d)
yield ast.Assign(targets=t, value=v, type_comment=tc)
@out_of_core
@trace
def make_AugAssign(self, d):
#pprint("make_AugAssign(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#target = self.make_expr(d)
#op = self.make_operator(d)
#value = self.make_expr(d)
target = partial(self.make_expr, d)
op = partial(self.make_operator, d)
value = partial(self.make_expr, d)
for t, o, v in product(target, op, value):
assert not isinstance(t, GeneratorType)
assert not isinstance(o, GeneratorType)
assert not isinstance(v, GeneratorType)
#pprint("make_AugAssign t: %s, o: %s, v: %s" % (t, o, v,), indent=d)
yield ast.AugAssign(target=t, op=o, value=v)
@out_of_core
@trace
def make_AnnAssign(self, d):
#pprint("make_AnnAssign(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#target = self.make_expr(d)
#annotation = self.make_expr(d)
#value = self.make_optional(self.make_expr, d)
#simple = self.make_int(d)
target = partial(self.make_expr, d)
annotation = partial(self.make_expr, d)
value = partial(self.make_optional, self.make_expr, d)
simple = partial(self.make_int, d)
for t, a, v, s in product(target, annotation, value, simple):
assert not isinstance(t, GeneratorType)
assert not isinstance(a, GeneratorType)
assert not isinstance(v, GeneratorType)
assert not isinstance(s, GeneratorType)
#pprint("make_AnnAssign t: %s, a: %s, v: %s, s: %s" % (t, a, v, s,), indent=d)
yield ast.AnnAssign(target=t, annotation=a, value=v, simple=s)
@out_of_core
@trace
def make_For(self, d):
#pprint("make_For(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#target = self.make_expr(d)
#iter_ = self.make_expr(d)
#body = self.make_star(self.make_stmt, d)
#orelse = self.make_star(self.make_stmt, d)
#type_comment = self.make_optional(self.make_string, d)
target = partial(self.make_expr, d)
iter_ = partial(self.make_expr, d)
body = partial(self.make_star, self.make_stmt, d)
orelse = partial(self.make_star, self.make_stmt, d)
type_comment = partial(self.make_optional, self.make_string, d)
for t, i, b, o, tc in product(target, iter_, body, orelse, type_comment):
assert not isinstance(t, GeneratorType)
assert not isinstance(i, GeneratorType)
assert not isinstance(b, GeneratorType)
assert not isinstance(o, GeneratorType)
assert not isinstance(tc, GeneratorType)
#pprint("make_For t: %s, i: %s, b: %s, o: %s, tc: %s" % (t, i, b, o, tc,), indent=d)
yield ast.For(target=t, iter=i, body=list(b), orelse=list(o), type_comment=tc)
@out_of_core
@trace
def make_AsyncFor(self, d):
#pprint("make_AsyncFor(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#target = self.make_expr(d)
#iter_ = self.make_expr(d)
#body = self.make_star(self.make_stmt, d)
#orelse = self.make_star(self.make_stmt, d)
#type_comment = self.make_optional(self.make_string, d)
target = partial(self.make_expr, d)
iter_ = partial(self.make_expr, d)
body = partial(self.make_star, self.make_stmt, d)
orelse = partial(self.make_star, self.make_stmt, d)
type_comment = partial(self.make_optional, self.make_string, d)
for t, i, b, o, tc in product(target, iter_, body, orelse, type_comment):
assert not isinstance(t, GeneratorType)
assert not isinstance(i, GeneratorType)
assert not isinstance(b, GeneratorType)
assert not isinstance(o, GeneratorType)
assert not isinstance(tc, GeneratorType)
#pprint("make_AsyncFor t: %s, i: %s, b: %s, o: %s, tc: %s" % (t, i, b, o, tc,), indent=d)
yield ast.AsyncFor(target=t, iter=i, body=list(b), orelse=list(o), type_comment=tc)
@out_of_core
@trace
def make_While(self, d):
#pprint("make_While(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#test = self.make_expr(d)
#body = self.make_star(self.make_stmt, d)
#orelse = self.make_star(self.make_stmt, d)
test = partial(self.make_expr, d)
body = partial(self.make_star, self.make_stmt, d)
orelse = partial(self.make_star, self.make_stmt, d)
for t, b, o in product(test, body, orelse):
assert not isinstance(t, GeneratorType)
assert not isinstance(b, GeneratorType)
assert not isinstance(o, GeneratorType)
#pprint("make_While t: %s, b: %s, o: %s" % (t, b, o,), indent=d)
yield ast.While(test=t, body=list(b), orelse=list(o))
@out_of_core
@trace
def make_If(self, d):
#pprint("make_If(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#test = self.make_expr(d)
#body = self.make_star(self.make_stmt, d)
#orelse = self.make_star(self.make_stmt, d)
test = partial(self.make_expr, d)
body = partial(self.make_star, self.make_stmt, d)
orelse = partial(self.make_star, self.make_stmt, d)
for t, b, o in product(test, body, orelse):
assert not isinstance(t, GeneratorType)
assert not isinstance(b, GeneratorType)
assert not isinstance(o, GeneratorType)
#pprint("make_If t: %s, b: %s, o: %s" % (t, b, o,), indent=d)
yield ast.If(test=t, body=list(b), orelse=list(o))
@out_of_core
@trace
def make_With(self, d):
#pprint("make_With(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#items = self.make_star(self.make_withitem, d)
#body = self.make_star(self.make_stmt, d)
#type_comment = self.make_optional(self.make_string, d)
items = partial(self.make_star, self.make_withitem, d)
body = partial(self.make_star, self.make_stmt, d)
type_comment = partial(self.make_optional, self.make_string, d)
for i, b, tc in product(items, body, type_comment):
assert not isinstance(i, GeneratorType)
assert not isinstance(b, GeneratorType)
assert not isinstance(tc, GeneratorType)
#pprint("make_With i: %s, b: %s, tc: %s" % (i, b, tc,), indent=d)
yield ast.With(items=i, body=list(b), type_comment=tc)
@out_of_core
@trace
def make_AsyncWith(self, d):
#pprint("make_AsyncWith(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#items = self.make_star(self.make_withitem, d)
#body = self.make_star(self.make_stmt, d)
#type_comment = self.make_optional(self.make_string, d)
items = partial(self.make_star, self.make_withitem, d)
body = partial(self.make_star, self.make_stmt, d)
type_comment = partial(self.make_optional, self.make_string, d)
for i, b, tc in product(items, body, type_comment):
assert not isinstance(i, GeneratorType)
assert not isinstance(b, GeneratorType)
assert not isinstance(tc, GeneratorType)
#pprint("make_AsyncWith i: %s, b: %s, tc: %s" % (i, b, tc,), indent=d)
yield ast.AsyncWith(items=i, body=list(b), type_comment=tc)
@out_of_core
@trace
def make_Match(self, d):
#pprint("make_Match(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#subject = self.make_expr(d)
#cases = self.make_star(self.make_match_case, d)
subject = partial(self.make_expr, d)
cases = partial(self.make_star, self.make_match_case, d)
for s, c in product(subject, cases):
assert not isinstance(s, GeneratorType)
assert not isinstance(c, GeneratorType)
#pprint("make_Match s: %s, c: %s" % (s, c,), indent=d)
yield ast.Match(subject=s, cases=c)
@out_of_core
@trace
def make_Raise(self, d):
#pprint("make_Raise(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#exc = self.make_optional(self.make_expr, d)
#cause = self.make_optional(self.make_expr, d)
exc = partial(self.make_optional, self.make_expr, d)
cause = partial(self.make_optional, self.make_expr, d)
for e, c in product(exc, cause):
assert not isinstance(e, GeneratorType)
assert not isinstance(c, GeneratorType)
#pprint("make_Raise e: %s, c: %s" % (e, c,), indent=d)
yield ast.Raise(exc=e, cause=c)
@out_of_core
@trace
def make_Try(self, d):
#pprint("make_Try(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#body = self.make_star(self.make_stmt, d)
#handlers = self.make_star(self.make_excepthandler, d)
#orelse = self.make_star(self.make_stmt, d)
#finalbody = self.make_star(self.make_stmt, d)
body = partial(self.make_star, self.make_stmt, d)
handlers = partial(self.make_star, self.make_excepthandler, d)
orelse = partial(self.make_star, self.make_stmt, d)
finalbody = partial(self.make_star, self.make_stmt, d)
for b, h, o, f in product(body, handlers, orelse, finalbody):
assert not isinstance(b, GeneratorType)
assert not isinstance(h, GeneratorType)
assert not isinstance(o, GeneratorType)
assert not isinstance(f, GeneratorType)
#pprint("make_Try b: %s, h: %s, o: %s, f: %s" % (b, h, o, f,), indent=d)
yield ast.Try(body=list(b), handlers=h, orelse=list(o), finalbody=list(f))
@out_of_core
@trace
def make_Assert(self, d):
#pprint("make_Assert(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#test = self.make_expr(d)
#msg = self.make_optional(self.make_expr, d)
test = partial(self.make_expr, d)
msg = partial(self.make_optional, self.make_expr, d)
for t, m in product(test, msg):
assert not isinstance(t, GeneratorType)
assert not isinstance(m, GeneratorType)
#pprint("make_Assert t: %s, m: %s" % (t, m,), indent=d)
yield ast.Assert(test=t, msg=m)
@out_of_core
@trace
def make_Import(self, d):
#pprint("make_Import(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
names = self.make_star(self.make_alias, d)
for n in names:
assert not isinstance(n, GeneratorType)
#pprint("make_Import n: %s" % (n,), indent=d)
yield ast.Import(names=list(n))
@out_of_core
@trace
def make_ImportFrom(self, d):
#pprint("make_ImportFrom(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#module = self.make_optional(self.make_identifier, d)
#names = self.make_star(self.make_alias, d)
#level = self.make_optional(self.make_int, d)
module = partial(self.make_optional, self.make_identifier, d)
names = partial(self.make_star, self.make_alias, d)
level = partial(self.make_optional, self.make_int, d)
for m, n, l in product(module, names, level):
assert not isinstance(m, GeneratorType)
assert not isinstance(n, GeneratorType)
assert not isinstance(l, GeneratorType)
#pprint("make_ImportFrom m: %s, n: %s, l: %s" % (m, n, l,), indent=d)
yield ast.ImportFrom(module=m, names=list(n), level=l)
@out_of_core
@trace
def make_Global(self, d):
#pprint("make_Global(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
names = self.make_star(self.make_identifier, d)
for n in names:
assert not isinstance(n, GeneratorType)
#pprint("make_Global n: %s" % (n,), indent=d)
yield ast.Global(names=list(n))
@out_of_core
@trace
def make_Nonlocal(self, d):
#pprint("make_Nonlocal(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
names = self.make_star(self.make_identifier, d)
for n in names:
assert not isinstance(n, GeneratorType)
#pprint("make_Nonlocal n: %s" % (n,), indent=d)
yield ast.Nonlocal(names=list(n))
@out_of_core
@trace
def make_Expr(self, d):
#pprint("make_Expr(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
value = self.make_expr(d)
for v in value:
assert not isinstance(v, GeneratorType)
#pprint("make_Expr v: %s" % (v,), indent=d)
yield ast.Expr(value=v)
@out_of_core
@trace
def make_Pass(self, d):
#pprint("make_Pass(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Pass()
@out_of_core
@trace
def make_Break(self, d):
#pprint("make_Break(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Break()
@out_of_core
@trace
def make_Continue(self, d):
#pprint("make_Continue(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Continue()
@out_of_core
@trace
def make_expr(self, d):
#pprint("make_expr(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
choices = [
self.make_BoolOp,
self.make_NamedExpr,
self.make_BinOp,
self.make_UnaryOp,
self.make_Lambda,
self.make_IfExp,
self.make_Dict,
self.make_Set,
self.make_ListComp,
self.make_SetComp,
self.make_DictComp,
self.make_GeneratorExp,
self.make_Await,
self.make_Yield,
self.make_YieldFrom,
self.make_Compare,
self.make_Call,
self.make_FormattedValue,
self.make_JoinedStr,
self.make_Constant,
self.make_Attribute,
self.make_Subscript,
self.make_Starred,
self.make_Name,
self.make_List,
self.make_Tuple,
self.make_Slice,
]
for c in self.choice(choices):
assert not isinstance(c, GeneratorType)
#pprint("make_expr c: %s" % (c,), indent=d)
yield from c(d)
@out_of_core
@trace
def make_BoolOp(self, d):
#pprint("make_BoolOp(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#op = self.make_boolop(d)
#values = self.make_star(self.make_expr, d)
op = partial(self.make_boolop, d)
values = partial(self.make_star, self.make_expr, d)
for o, v in product(op, values):
assert not isinstance(o, GeneratorType)
assert not isinstance(v, GeneratorType)
#pprint("make_BoolOp o: %s, v: %s" % (o, v,), indent=d)
yield ast.BoolOp(op=o, values=v)
@out_of_core
@trace
def make_NamedExpr(self, d):
#pprint("make_NamedExpr(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#target = self.make_expr(d)
#value = self.make_expr(d)
target = partial(self.make_expr, d)
value = partial(self.make_expr, d)
for t, v in product(target, value):
assert not isinstance(t, GeneratorType)
assert not isinstance(v, GeneratorType)
#pprint("make_NamedExpr t: %s, v: %s" % (t, v,), indent=d)
yield ast.NamedExpr(target=t, value=v)
@out_of_core
@trace
def make_BinOp(self, d):
#pprint("make_BinOp(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#left = self.make_expr(d)
#op = self.make_operator(d)
#right = self.make_expr(d)
left = partial(self.make_expr, d)
op = partial(self.make_operator, d)
right = partial(self.make_expr, d)
for l, o, r in product(left, op, right):
assert not isinstance(l, GeneratorType)
assert not isinstance(o, GeneratorType)
assert not isinstance(r, GeneratorType)
#pprint("make_BinOp l: %s, o: %s, r: %s" % (l, o, r,), indent=d)
yield ast.BinOp(left=l, op=o, right=r)
@out_of_core
@trace
def make_UnaryOp(self, d):
#pprint("make_UnaryOp(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#op = self.make_unaryop(d)
#operand = self.make_expr(d)
op = partial(self.make_unaryop, d)
operand = partial(self.make_expr, d)
for o, a in product(op, operand):
assert not isinstance(o, GeneratorType)
assert not isinstance(a, GeneratorType)
#pprint("make_UnaryOp o: %s, a: %s" % (o, a,), indent=d)
yield ast.UnaryOp(op=o, operand=a)
@out_of_core
@trace
def make_Lambda(self, d):
#pprint("make_Lambda(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#args = self.make_arguments(d)
#body = self.make_expr(d)
args = partial(self.make_arguments, d)
body = partial(self.make_expr, d)
for a, b in product(args, body):
assert not isinstance(a, GeneratorType)
assert not isinstance(b, GeneratorType)
#pprint("make_Lambda a: %s, b: %s" % (a, b,), indent=d)
yield ast.Lambda(args=a, body=list(b))
@out_of_core
@trace
def make_IfExp(self, d):
#pprint("make_IfExp(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#test = self.make_expr(d)
#body = self.make_expr(d)
#orelse = self.make_expr(d)
test = partial(self.make_expr, d)
body = partial(self.make_expr, d)
orelse = partial(self.make_expr, d)
for t, b, o in product(test, body, orelse):
assert not isinstance(t, GeneratorType)
assert not isinstance(b, GeneratorType)
assert not isinstance(o, GeneratorType)
#pprint("make_IfExp t: %s, b: %s, o: %s" % (t, b, o,), indent=d)
yield ast.IfExp(test=t, body=list(b), orelse=list(o))
@out_of_core
@trace
def make_Dict(self, d):
#pprint("make_Dict(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#keys = self.make_star(self.make_expr, d)
#values = self.make_star(self.make_expr, d)
keys = partial(self.make_star, self.make_expr, d)
values = partial(self.make_star, self.make_expr, d)
for k, v in product(keys, values):
assert not isinstance(k, GeneratorType)
assert not isinstance(v, GeneratorType)
#pprint("make_Dict k: %s, v: %s" % (k, v,), indent=d)
yield ast.Dict(keys=k, values=v)
@out_of_core
@trace
def make_Set(self, d):
#pprint("make_Set(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
elts = self.make_star(self.make_expr, d)
for e in elts:
assert not isinstance(e, GeneratorType)
#pprint("make_Set e: %s" % (e,), indent=d)
yield ast.Set(elts=e)
@out_of_core
@trace
def make_ListComp(self, d):
#pprint("make_ListComp(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#elt = self.make_expr(d)
#generators = self.make_star(self.make_comprehension, d)
elt = partial(self.make_expr, d)
generators = partial(self.make_star, self.make_comprehension, d)
for e, g in product(elt, generators):
assert not isinstance(e, GeneratorType)
assert not isinstance(g, GeneratorType)
#pprint("make_ListComp e: %s, g: %s" % (e, g,), indent=d)
yield ast.ListComp(elt=e, generators=g)
@out_of_core
@trace
def make_SetComp(self, d):
#pprint("make_SetComp(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#elt = self.make_expr(d)
#generators = self.make_star(self.make_comprehension, d)
elt = partial(self.make_expr, d)
generators = partial(self.make_star, self.make_comprehension, d)
for e, g in product(elt, generators):
assert not isinstance(e, GeneratorType)
assert not isinstance(g, GeneratorType)
#pprint("make_SetComp e: %s, g: %s" % (e, g,), indent=d)
yield ast.SetComp(elt=e, generators=g)
@out_of_core
@trace
def make_DictComp(self, d):
#pprint("make_DictComp(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#key = self.make_expr(d)
#value = self.make_expr(d)
#generators = self.make_star(self.make_comprehension, d)
key = partial(self.make_expr, d)
value = partial(self.make_expr, d)
generators = partial(self.make_star, self.make_comprehension, d)
for k, v, g in product(key, value, generators):
assert not isinstance(k, GeneratorType)
assert not isinstance(v, GeneratorType)
assert not isinstance(g, GeneratorType)
#pprint("make_DictComp k: %s, v: %s, g: %s" % (k, v, g,), indent=d)
yield ast.DictComp(key=k, value=v, generators=g)
@out_of_core
@trace
def make_GeneratorExp(self, d):
#pprint("make_GeneratorExp(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#elt = self.make_expr(d)
#generators = self.make_star(self.make_comprehension, d)
elt = partial(self.make_expr, d)
generators = partial(self.make_star, self.make_comprehension, d)
for e, g in product(elt, generators):
assert not isinstance(e, GeneratorType)
assert not isinstance(g, GeneratorType)
#pprint("make_GeneratorExp e: %s, g: %s" % (e, g,), indent=d)
yield ast.GeneratorExp(elt=e, generators=g)
@out_of_core
@trace
def make_Await(self, d):
#pprint("make_Await(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
value = self.make_expr(d)
for v in value:
assert not isinstance(v, GeneratorType)
#pprint("make_Await v: %s" % (v,), indent=d)
yield ast.Await(value=v)
@out_of_core
@trace
def make_Yield(self, d):
#pprint("make_Yield(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
value = self.make_optional(self.make_expr, d)
for v in value:
assert not isinstance(v, GeneratorType)
#pprint("make_Yield v: %s" % (v,), indent=d)
yield ast.Yield(value=v)
@out_of_core
@trace
def make_YieldFrom(self, d):
#pprint("make_YieldFrom(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
value = self.make_expr(d)
for v in value:
assert not isinstance(v, GeneratorType)
#pprint("make_YieldFrom v: %s" % (v,), indent=d)
yield ast.YieldFrom(value=v)
@out_of_core
@trace
def make_Compare(self, d):
#pprint("make_Compare(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#left = self.make_expr(d)
#ops = self.make_star(self.make_cmpop, d)
#comparators = self.make_star(self.make_expr, d)
left = partial(self.make_expr, d)
ops = partial(self.make_star, self.make_cmpop, d)
comparators = partial(self.make_star, self.make_expr, d)
for l, o, c in product(left, ops, comparators):
assert not isinstance(l, GeneratorType)
assert not isinstance(o, GeneratorType)
assert not isinstance(c, GeneratorType)
#pprint("make_Compare l: %s, o: %s, c: %s" % (l, o, c,), indent=d)
yield ast.Compare(left=l, ops=o, comparators=c)
@out_of_core
@trace
def make_Call(self, d):
#pprint("make_Call(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#func = self.make_expr(d)
#args = self.make_star(self.make_expr, d)
#keywords = self.make_star(self.make_keyword, d)
func = partial(self.make_expr, d)
args = partial(self.make_star, self.make_expr, d)
keywords = partial(self.make_star, self.make_keyword, d)
for f, a, k in product(func, args, keywords):
assert not isinstance(f, GeneratorType)
assert not isinstance(a, GeneratorType)
assert not isinstance(k, GeneratorType)
#pprint("make_Call f: %s, a: %s, k: %s" % (f, a, k,), indent=d)
yield ast.Call(func=f, args=a, keywords=k)
@out_of_core
@trace
def make_FormattedValue(self, d):
#pprint("make_FormattedValue(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#value = self.make_expr(d)
#conversion = self.make_int(d)
#format_spec = self.make_optional(self.make_expr, d)
value = partial(self.make_expr, d)
conversion = partial(self.make_int, d)
format_spec = partial(self.make_optional, self.make_expr, d)
for v, c, f in product(value, conversion, format_spec):
assert not isinstance(v, GeneratorType)
assert not isinstance(c, GeneratorType)
assert not isinstance(f, GeneratorType)
#pprint("make_FormattedValue v: %s, c: %s, f: %s" % (v, c, f,), indent=d)
yield ast.FormattedValue(value=v, conversion=c, format_spec=f)
@out_of_core
@trace
def make_JoinedStr(self, d):
#pprint("make_JoinedStr(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
values = self.make_star(self.make_expr, d)
for v in values:
assert not isinstance(v, GeneratorType)
#pprint("make_JoinedStr v: %s" % (v,), indent=d)
yield ast.JoinedStr(values=v)
@out_of_core
@trace
def make_Constant(self, d):
#pprint("make_Constant(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#value = self.make_constant(d)
#kind = self.make_optional(self.make_string, d)
value = partial(self.make_constant, d)
kind = partial(self.make_optional, self.make_string, d)
for v, k in product(value, kind):
assert not isinstance(v, GeneratorType)
assert not isinstance(k, GeneratorType)
#pprint("make_Constant v: %s, k: %s" % (v, k,), indent=d)
yield ast.Constant(value=v, kind=k)
@out_of_core
@trace
def make_Attribute(self, d):
#pprint("make_Attribute(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#value = self.make_expr(d)
#attr = self.make_identifier(d)
#ctx = self.make_expr_context(d)
value = partial(self.make_expr, d)
attr = partial(self.make_identifier, d)
ctx = partial(self.make_expr_context, d)
for v, a, c in product(value, attr, ctx):
assert not isinstance(v, GeneratorType)
assert not isinstance(a, GeneratorType)
assert not isinstance(c, GeneratorType)
#pprint("make_Attribute v: %s, a: %s, c: %s" % (v, a, c,), indent=d)
yield ast.Attribute(value=v, attr=a, ctx=c)
@out_of_core
@trace
def make_Subscript(self, d):
#pprint("make_Subscript(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#value = self.make_expr(d)
#slice_ = self.make_expr(d)
#ctx = self.make_expr_context(d)
value = partial(self.make_expr, d)
slice_ = partial(self.make_expr, d)
ctx = partial(self.make_expr_context, d)
for v, s, c in product(value, slice_, ctx):
assert not isinstance(v, GeneratorType)
assert not isinstance(s, GeneratorType)
assert not isinstance(c, GeneratorType)
#pprint("make_Subscript v: %s, s: %s, c: %s" % (v, s, c,), indent=d)
yield ast.Subscript(value=v, slice=s, ctx=c)
@out_of_core
@trace
def make_Starred(self, d):
#pprint("make_Starred(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#value = self.make_expr(d)
#ctx = self.make_expr_context(d)
value = partial(self.make_expr, d)
ctx = partial(self.make_expr_context, d)
for v, c in product(value, ctx):
assert not isinstance(v, GeneratorType)
assert not isinstance(c, GeneratorType)
#pprint("make_Starred v: %s, c: %s" % (v, c,), indent=d)
yield ast.Starred(value=v, ctx=c)
@out_of_core
@trace
def make_Name(self, d):
#pprint("make_Name(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#id_ = self.make_identifier(d)
#ctx = self.make_expr_context(d)
id_ = partial(self.make_identifier, d)
ctx = partial(self.make_expr_context, d)
for i, c in product(id_, ctx):
assert not isinstance(i, GeneratorType)
assert not isinstance(c, GeneratorType)
#pprint("make_Name i: %s, c: %s" % (i, c,), indent=d)
yield ast.Name(id=i, ctx=c)
@out_of_core
@trace
def make_List(self, d):
#pprint("make_List(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#elts = self.make_star(self.make_expr, d)
#ctx = self.make_expr_context(d)
elts = partial(self.make_star, self.make_expr, d)
ctx = partial(self.make_expr_context, d)
for e, c in product(elts, ctx):
assert not isinstance(e, GeneratorType)
assert not isinstance(c, GeneratorType)
#pprint("make_List e: %s, c: %s" % (e, c,), indent=d)
yield ast.List(elts=e, ctx=c)
@out_of_core
@trace
def make_Tuple(self, d):
#pprint("make_Tuple(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#elts = self.make_star(self.make_expr, d)
#ctx = self.make_expr_context(d)
elts = partial(self.make_star, self.make_expr, d)
ctx = partial(self.make_expr_context, d)
for e, c in product(elts, ctx):
assert not isinstance(e, GeneratorType)
assert not isinstance(c, GeneratorType)
#pprint("make_Tuple e: %s, c: %s" % (e, c,), indent=d)
yield ast.Tuple(elts=e, ctx=c)
@out_of_core
@trace
def make_Slice(self, d):
#pprint("make_Slice(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#lower = self.make_optional(self.make_expr, d)
#upper = self.make_optional(self.make_expr, d)
#step = self.make_optional(self.make_expr, d)
lower = partial(self.make_optional, self.make_expr, d)
upper = partial(self.make_optional, self.make_expr, d)
step = partial(self.make_optional, self.make_expr, d)
for l, u, s in product(lower, upper, step):
assert not isinstance(l, GeneratorType)
assert not isinstance(u, GeneratorType)
assert not isinstance(s, GeneratorType)
#pprint("make_Slice l: %s, u: %s, s: %s" % (l, u, s,), indent=d)
yield ast.Slice(lower=l, upper=u, step=s)
@out_of_core
@trace
def make_expr_context(self, d):
#pprint("make_expr_context(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
choices = [
self.make_Load,
self.make_Store,
self.make_Del,
]
for c in self.choice(choices):
assert not isinstance(c, GeneratorType)
#pprint("make_expr_context c: %s" % (c,), indent=d)
yield from c(d)
@out_of_core
@trace
def make_Load(self, d):
#pprint("make_Load(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Load()
@out_of_core
@trace
def make_Store(self, d):
#pprint("make_Store(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Store()
@out_of_core
@trace
def make_Del(self, d):
#pprint("make_Del(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Del()
@out_of_core
@trace
def make_boolop(self, d):
#pprint("make_boolop(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
choices = [
self.make_And,
self.make_Or,
]
for c in self.choice(choices):
assert not isinstance(c, GeneratorType)
#pprint("make_boolop c: %s" % (c,), indent=d)
yield from c(d)
@out_of_core
@trace
def make_And(self, d):
#pprint("make_And(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.And()
@out_of_core
@trace
def make_Or(self, d):
#pprint("make_Or(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Or()
@out_of_core
@trace
def make_operator(self, d):
#pprint("make_operator(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
choices = [
self.make_Add,
self.make_Sub,
self.make_Mult,
self.make_MatMult,
self.make_Div,
self.make_Mod,
self.make_Pow,
self.make_LShift,
self.make_RShift,
self.make_BitOr,
self.make_BitXor,
self.make_BitAnd,
self.make_FloorDiv,
]
for c in self.choice(choices):
assert not isinstance(c, GeneratorType)
#pprint("make_operator c: %s" % (c,), indent=d)
yield from c(d)
@out_of_core
@trace
def make_Add(self, d):
#pprint("make_Add(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Add()
@out_of_core
@trace
def make_Sub(self, d):
#pprint("make_Sub(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Sub()
@out_of_core
@trace
def make_Mult(self, d):
#pprint("make_Mult(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Mult()
@out_of_core
@trace
def make_MatMult(self, d):
#pprint("make_MatMult(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.MatMult()
@out_of_core
@trace
def make_Div(self, d):
#pprint("make_Div(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Div()
@out_of_core
@trace
def make_Mod(self, d):
#pprint("make_Mod(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Mod()
@out_of_core
@trace
def make_Pow(self, d):
#pprint("make_Pow(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Pow()
@out_of_core
@trace
def make_LShift(self, d):
#pprint("make_LShift(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.LShift()
@out_of_core
@trace
def make_RShift(self, d):
#pprint("make_RShift(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.RShift()
@out_of_core
@trace
def make_BitOr(self, d):
#pprint("make_BitOr(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.BitOr()
@out_of_core
@trace
def make_BitXor(self, d):
#pprint("make_BitXor(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.BitXor()
@out_of_core
@trace
def make_BitAnd(self, d):
#pprint("make_BitAnd(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.BitAnd()
@out_of_core
@trace
def make_FloorDiv(self, d):
#pprint("make_FloorDiv(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.FloorDiv()
@out_of_core
@trace
def make_unaryop(self, d):
#pprint("make_unaryop(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
choices = [
self.make_Invert,
self.make_Not,
self.make_UAdd,
self.make_USub,
]
for c in self.choice(choices):
assert not isinstance(c, GeneratorType)
#pprint("make_unaryop c: %s" % (c,), indent=d)
yield from c(d)
@out_of_core
@trace
def make_Invert(self, d):
#pprint("make_Invert(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Invert()
@out_of_core
@trace
def make_Not(self, d):
#pprint("make_Not(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Not()
@out_of_core
@trace
def make_UAdd(self, d):
#pprint("make_UAdd(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.UAdd()
@out_of_core
@trace
def make_USub(self, d):
#pprint("make_USub(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.USub()
@out_of_core
@trace
def make_cmpop(self, d):
#pprint("make_cmpop(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
choices = [
self.make_Eq,
self.make_NotEq,
self.make_Lt,
self.make_LtE,
self.make_Gt,
self.make_GtE,
self.make_Is,
self.make_IsNot,
self.make_In,
self.make_NotIn,
]
for c in self.choice(choices):
assert not isinstance(c, GeneratorType)
#pprint("make_cmpop c: %s" % (c,), indent=d)
yield from c(d)
@out_of_core
@trace
def make_Eq(self, d):
#pprint("make_Eq(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Eq()
@out_of_core
@trace
def make_NotEq(self, d):
#pprint("make_NotEq(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.NotEq()
@out_of_core
@trace
def make_Lt(self, d):
#pprint("make_Lt(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Lt()
@out_of_core
@trace
def make_LtE(self, d):
#pprint("make_LtE(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.LtE()
@out_of_core
@trace
def make_Gt(self, d):
#pprint("make_Gt(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Gt()
@out_of_core
@trace
def make_GtE(self, d):
#pprint("make_GtE(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.GtE()
@out_of_core
@trace
def make_Is(self, d):
#pprint("make_Is(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.Is()
@out_of_core
@trace
def make_IsNot(self, d):
#pprint("make_IsNot(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.IsNot()
@out_of_core
@trace
def make_In(self, d):
#pprint("make_In(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.In()
@out_of_core
@trace
def make_NotIn(self, d):
#pprint("make_NotIn(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
yield ast.NotIn()
@out_of_core
@trace
def make_comprehension(self, d):
#pprint("make_comprehension(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#targetl = self.make_expr(d)
#iter_ = self.make_expr(d)
#ifs = self.make_star(self.make_expr, d)
#is_async = self.make_int(d)
target = partial(self.make_expr, d)
iter_ = partial(self.make_expr, d)
ifs = partial(self.make_star, self.make_expr, d)
is_async = partial(self.make_int, d)
for t, it, i, a in product(target, iter_, ifs, is_async):
assert not isinstance(t, GeneratorType)
assert not isinstance(it, GeneratorType)
assert not isinstance(i, GeneratorType)
assert not isinstance(a, GeneratorType)
#pprint("make_comprehension t: %s, it: %s, i: %s, a: %s" % (t, it, i, a,), indent=d)
yield ast.comprehension(target=t, iter=it, ifs=i, is_async=a)
@out_of_core
@trace
def make_excepthandler(self, d):
#pprint("make_excepthandler(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#type_ = self.make_optional(self.make_expr, d)
#name = self.make_optional(self.make_identifier, d)
#body = self.make_star(self.make_stmt, d)
type_ = partial(self.make_optional, self.make_expr, d)
name = partial(self.make_optional, self.make_identifier, d)
body = partial(self.make_star, self.make_stmt, d)
for t, n, b in product(type_, name, body):
assert not isinstance(t, GeneratorType)
assert not isinstance(n, GeneratorType)
assert not isinstance(b, GeneratorType)
#pprint("make_excepthandler t: %s, n: %s, b: %s" % (t, n, b,), indent=d)
yield ast.excepthandler(type=t, name=n, body=list(b))
@out_of_core
@trace
def make_arguments(self, d):
#pprint("make_arguments(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#posonlyargs = self.make_star(self.make_arg, d)
#args = self.make_star(self.make_arg, d)
#vararg = self.make_optional(self.make_arg, d)
#kwonlyargs = self.make_star(self.make_arg, d)
#kw_defaults = self.make_star(self.make_expr, d)
#kwarg = self.make_optional(self.make_arg, d)
#defaults = self.make_star(self.make_expr, d)
posonlyargs = partial(self.make_star, self.make_arg, d)
args = partial(self.make_star, self.make_arg, d)
vararg = partial(self.make_optional, self.make_arg, d)
kwonlyargs = partial(self.make_star, self.make_arg, d)
kw_defaults = partial(self.make_star, self.make_expr, d)
kwarg = partial(self.make_optional, self.make_arg, d)
defaults = partial(self.make_star, self.make_expr, d)
for p, a, v, kwo, kwd, kwa, df in product(posonlyargs, args, vararg, kwonlyargs, kw_defaults, kwarg, defaults):
assert not isinstance(p, GeneratorType)
assert not isinstance(a, GeneratorType)
assert not isinstance(v, GeneratorType)
assert not isinstance(kwo, GeneratorType)
assert not isinstance(kwd, GeneratorType)
assert not isinstance(kwa, GeneratorType)
assert not isinstance(df, GeneratorType)
#pprint("make_arguments p: %s, a: %s, v: %s, kwo: %s, kwd: %s, kwa: %s, df: %s" % (p, a, v, kwo, kwd, kwa, df,), indent=d)
yield ast.arguments(posonlyargs=p, args=a, vararg=v, kwonlyargs=kwo, kw_defaults=kwd, kwarg=kwa, defaults=df)
@out_of_core
@trace
def make_arg(self, d):
#pprint("make_arg(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#arg = self.make_identifier(d)
#annotation = self.make_optional(self.make_expr, d)
#type_comment = self.make_optional(self.make_string, d)
arg = partial(self.make_identifier, d)
annotation = partial(self.make_optional, self.make_expr, d)
type_comment = partial(self.make_optional, self.make_string, d)
for a, n, t in product(arg, annotation, type_comment):
assert not isinstance(a, GeneratorType)
assert not isinstance(n, GeneratorType)
assert not isinstance(t, GeneratorType)
#pprint("make_arg a: %s, n: %s, t: %s" % (a, n, t,), indent=d)
yield ast.arg(arg=a, annotation=n, type_comment=t)
@out_of_core
@trace
def make_keyword(self, d):
#pprint("make_keyword(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#arg = self.make_optional(self.make_identifier, d)
#value = self.make_expr(d)
arg = partial(self.make_optional, self.make_identifier, d)
value = partial(self.make_expr, d)
for a, v in product(arg, value):
assert not isinstance(a, GeneratorType)
assert not isinstance(v, GeneratorType)
#pprint("make_keyword a: %s, v: %s" % (a, v,), indent=d)
yield ast.keyword(arg=a, value=v)
@out_of_core
@trace
def make_alias(self, d):
#pprint("make_alias(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#name = self.make_identifier(d)
#asname = self.make_optional(self.make_identifier, d)
name = partial(self.make_identifier, d)
asname = partial(self.make_optional, self.make_identifier, d)
for n, a in product(name, asname):
assert not isinstance(n, GeneratorType)
assert not isinstance(a, GeneratorType)
#pprint("make_alias n: %s, a: %s" % (n, a,), indent=d)
yield ast.alias(name=n, asname=a)
@out_of_core
@trace
def make_withitem(self, d):
#pprint("make_withitem(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#context_expr = self.make_expr(d)
#optional_vars = self.make_optional(self.make_expr, d)
context_expr = partial(self.make_expr, d)
optional_vars = partial(self.make_optional, self.make_expr, d)
for c, o in product(context_expr, optional_vars):
assert not isinstance(c, GeneratorType)
assert not isinstance(o, GeneratorType)
#pprint("make_withitem c: %s, o: %s" % (c, o,), indent=d)
yield ast.withitem(context_expr=c, optional_vars=o)
@out_of_core
@trace
def make_match_case(self, d):
#pprint("make_match_case(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#pattern = self.make_pattern(d)
#guard = self.make_optional(self.make_expr, d)
#body = self.make_star(self.make_stmt, d)
pattern = partial(self.make_pattern, d)
guard = partial(self.make_optional, self.make_expr, d)
body = partial(self.make_star, self.make_stmt, d)
for p, g, b in product(pattern, guard, body):
assert not isinstance(p, GeneratorType)
assert not isinstance(g, GeneratorType)
assert not isinstance(b, GeneratorType)
#pprint("make_match_case p: %s, g: %s, b: %s" % (p, g, b,), indent=d)
yield ast.match_case(pattern=p, guard=g, body=list(b))
@out_of_core
@trace
def make_pattern(self, d):
#pprint("make_pattern(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
choices = [
self.make_MatchValue,
self.make_MatchSingleton,
self.make_MatchSequence,
self.make_MatchMapping,
self.make_MatchClass,
self.make_MatchStar,
self.make_MatchAs,
self.make_MatchOr,
]
for c in self.choice(choices):
assert not isinstance(c, GeneratorType)
#pprint("make_pattern c: %s" % (c,), indent=d)
yield from c(d)
@out_of_core
@trace
def make_MatchValue(self, d):
#pprint("make_MatchValue(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
value = self.make_expr(d)
for v in value:
assert not isinstance(v, GeneratorType)
#pprint("make_MatchValue v: %s" % (v,), indent=d)
yield ast.MatchValue(value=v)
@out_of_core
@trace
def make_MatchSingleton(self, d):
#pprint("make_MatchSingleton(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
value = self.make_constant(d)
for v in value:
assert not isinstance(v, GeneratorType)
#pprint("make_MatchSingleton v: %s" % (v,), indent=d)
yield ast.MatchSingleton(value=v)
@out_of_core
@trace
def make_MatchSequence(self, d):
#pprint("make_MatchSequence(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
patterns = self.make_star(self.make_pattern, d)
for p in patterns:
assert not isinstance(p, GeneratorType)
#pprint("make_MatchSequence v: %s" % (p,), indent=d)
yield ast.MatchSequence(patterns=p)
@out_of_core
@trace
def make_MatchMapping(self, d):
#pprint("make_MatchMapping(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#keys = self.make_star(self.make_expr, d)
#patterns = self.make_star(self.make_pattern, d)
#rest = self.make_optional(self.make_identifier, d)
keys = partial(self.make_star, self.make_expr, d)
patterns = partial(self.make_star, self.make_pattern, d)
rest = partial(self.make_optional, self.make_identifier, d)
for k, p, r in product(keys, patterns, rest):
assert not isinstance(k, GeneratorType)
assert not isinstance(p, GeneratorType)
assert not isinstance(r, GeneratorType)
#pprint("make_MatchMapping k: %s, p: %s, r: %s" % (k, p, r,), indent=d)
yield ast.MatchMapping(keys=k, patterns=p, rest=r)
@out_of_core
@trace
def make_MatchClass(self, d):
#pprint("make_MatchClass(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#cls = self.make_expr(d)
#patterns = self.make_star(self.make_pattern, d)
#kwd_attrs = self.make_star(self.make_identifier, d)
#kwd_patterns = self.make_star(self.make_pattern, d)
cls = partial(self.make_expr, d)
patterns = partial(self.make_star, self.make_pattern, d)
kwd_attrs = partial(self.make_star, self.make_identifier, d)
kwd_patterns = partial(self.make_star, self.make_pattern, d)
for c, p, ka, kp in product(cls, patterns, kwd_attrs, kwd_patterns):
assert not isinstance(c, GeneratorType)
assert not isinstance(p, GeneratorType)
assert not isinstance(ka, GeneratorType)
assert not isinstance(kp, GeneratorType)
#pprint("make_MatchClass c: %s, p: %s, ka: %s, kp: %s" % (c, p, ka, kp,), indent=d)
yield ast.MatchClass(cls=c, patterns=p, kwd_attrs=ka, kwd_patterns=kp)
@out_of_core
@trace
def make_MatchStar(self, d):
#pprint("make_MatchStar(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
name = self.make_optional(self.make_identifier, d)
for n in name:
assert not isinstance(n, GeneratorType)
#pprint("make_MatchStar n: %s" % (n,), indent=d)
yield ast.MatchStar(name=n)
@out_of_core
@trace
def make_MatchAs(self, d):
#pprint("make_MatchAs(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#pattern = self.make_optional(self.make_pattern, d)
#name = self.make_optional(self.make_identifier, d)
pattern = partial(self.make_optional, self.make_pattern, d)
name = partial(self.make_optional, self.make_identifier, d)
for p, n in product(pattern, name):
assert not isinstance(p, GeneratorType)
assert not isinstance(n, GeneratorType)
#pprint("make_MatchAs p: %s, n: %s" % (p, n,), indent=d)
yield ast.MatchAs(pattern=p, name=n)
@out_of_core
@trace
def make_MatchOr(self, d):
#pprint("make_MatchOr(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
patterns = self.make_star(self.make_pattern, d)
for p in patterns:
assert not isinstance(p, GeneratorType)
#pprint("make_MatchOr p: %s" % (p,), indent=d)
yield ast.MatchOr(patterns=p)
@out_of_core
@trace
def make_type_ignore(self, d):
#pprint("make_TypeIgnore(d=%s)" % (d,), indent=d)
if d == self.max_rd: return # raise StopIteration()
d += 1
#lineno = self.make_int(d)
#tag = self.make_string(d)
lineno = partial(self.make_int, d)
tag = partial(self.make_string, d)
for l, t in product(lineno, tag):
assert not isinstance(l, GeneratorType)
assert not isinstance(t, GeneratorType)
#pprint("make_TypeIgnore l: %s, t: %s" % (l, t,), indent=d)
yield ast.TypeIgnore(lineno=l, tag=t)
@out_of_core
@trace
def make_int(self, d):
#pprint("make_int(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
# TODO
i = randrange(-10, 10)
yield i
@out_of_core
@trace
def make_string(self, d):
#pprint("make_string(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
# TODO
n = randrange(10)
c = ascii_letters + punctuation + digits
f = lambda _: choice(c)
s = map(f, range(n))
r = ''.join(s)
yield r
@out_of_core
@trace
def make_identifier(self, d):
# TODO identifier scope
# TODO declare identifier, reference identifier
#pprint("make_identifier(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
# TODO
n = randrange(10)
c = ascii_letters + digits
f = lambda _: choice(c)
s = map(f, range(n))
c = ascii_letters
s = (choice(c), *s)
r = ''.join(s)
yield r
@out_of_core
@trace
def make_constant(self, d):
#pprint("make_constant(d=%s)" % (d,), indent=d)
#if d == self.max_rd: raise StopIteration()
#d += 1
# integer
# float
# complex
# string
# boolean
choices = [
self.make_int,
self.make_float,
#self.make_complex,
self.make_string,
self.make_boolean,
]
for c in self.choice(choices):
assert not isinstance(c, GeneratorType)
yield from c(d)
@out_of_core
@trace
def make_float(self, d):
r = random()
yield r
#def make_complex(self, d): pass
@out_of_core
@trace
def make_boolean(self, d):
b = bool(getrandbits(1))
yield b
if __name__ == '__main__':
for rd in range(10):
print("rd: %s" % (rd,))
A = CG(rd)
for a, c, e in A.exec_module(): print("a: %s\nc: %s\ne: %s\n" % (ast.dump(a), c, e,))
print()
|
import currency
from datetime import datetime
from django import template
register = template.Library()
@register.filter
def stripe_amount(amount, symbol):
return currency.pretty(amount / pow(10, currency.decimals(symbol)), symbol)
@register.filter(name='fromunix')
def fromunix(value):
return datetime.fromtimestamp(int(value))
|
"""
No precedence between + and *. Whichever comes first, gets evaluated first.
"""
import re
def main():
input_file = "../puzzle_input.txt" # 209335026987
# input_file = "../test_input1.txt" # 2245406496
# input_file = "../test_input2.txt" # 51
# input_file = "../test_input3.txt" # 26335
total = 0
with open(input_file, "r") as fh:
for expr in fh:
total += parse(expr)
print(f"Total: {total}")
class Num(int):
"""Make add and multiply have same precedence by changing their operators
to some that have equal precedence and overwriting their functionality.
"""
def __floordiv__(self, other):
return Num(super().__add__(other))
def __mul__(self, other):
return Num(super().__mul__(other))
@classmethod
def evaluate(cls, expr):
new = expr.replace("+", "//").replace("*", "*")
new = re.sub(r"(\d+)", r"{:s}(\1)".format(cls.__name__), new)
return eval(new)
def parse(expr):
return Num.evaluate(expr)
if __name__ == "__main__":
main()
|
# stdlib
import re
import sys
from typing import Any
from typing import List
from typing import Tuple
from typing import Union
# third party
from google.protobuf.reflection import GeneratedProtocolMessageType
from nacl.signing import VerifyKey
# syft relative
from ... import serialize
from ...proto.core.node.common.action.action_pb2 import Action as Action_PB
from ...proto.core.plan.plan_pb2 import Plan as Plan_PB
from ..common.object import Serializable
from ..common.serde.serializable import bind_protobuf
from ..node.abstract.node import AbstractNode
from ..node.common.action.common import Action
from ..node.common.util import listify
from ..pointer.pointer import Pointer
CAMEL_TO_SNAKE_PAT = re.compile(r"(?<!^)(?=[A-Z])")
@bind_protobuf
class Plan(Serializable):
"""
A plan is a collection of actions, plus some variable inputs, that together form a computation graph.
Attributes:
actions: list of actions
inputs: Pointers to the inputs. Defaults to None.
"""
def __init__(
self, actions: List[Action], inputs: Union[Pointer, List[Pointer], None] = None
):
self.actions = actions
self.inputs: List[Pointer] = listify(inputs)
def __call__(
self, node: AbstractNode, verify_key: VerifyKey, *args: Tuple[Any]
) -> None:
"""
1) For all pointers that were passed into the init as `inputs`, this method
replaces those pointers in self.actions by the pointers passed in as *args.
2) Executes the actions in self.actions one by one
*While this function requires `node` and `verify_key` as inputs, during remote
execution, passing these is handled in `RunClassMethodAction`*
*Note that this method will receive *args as pointers during execution. Normally,
pointers are resolved during `RunClassMethodAction.execute()`, but not for plans,
as they need to operate on the pointer to enable remapping of the inputs.*
Args:
*args: the new inputs for the plan, passed as pointers
"""
inputs = listify(args)
# this is pretty cumbersome, we are searching through all actions to check
# if we need to redefine some of their attributes that are inputs in the
# graph of actions
for i, (current_input, new_input) in enumerate(zip(self.inputs, inputs)):
for a in self.actions:
if hasattr(a, "remap_input"):
a.remap_input(current_input, new_input) # type: ignore
# redefine the inputs of the plan
self.inputs[i] = new_input
for a in self.actions:
a.execute_action(node, verify_key)
@staticmethod
def get_protobuf_schema() -> GeneratedProtocolMessageType:
"""Return the type of protobuf object which stores a class of this type
As a part of serialization and deserialization, we need the ability to
lookup the protobuf object type directly from the object type. This
static method allows us to do this.
Importantly, this method is also used to create the reverse lookup ability within
the metaclass of Serializable. In the metaclass, it calls this method and then
it takes whatever type is returned from this method and adds an attribute to it
with the type of this class attached to it. See the MetaSerializable class for details.
:return: the type of protobuf object which corresponds to this class.
:rtype: GeneratedProtocolMessageType
"""
return Plan_PB
def _object2proto(self) -> Plan_PB:
"""Returns a protobuf serialization of self.
As a requirement of all objects which inherit from Serializable,
this method transforms the current object into the corresponding
Protobuf object so that it can be further serialized.
:return: returns a protobuf object
:rtype: ObjectWithID_PB
.. note::
This method is purely an internal method. Please use object.serialize() or one of
the other public serialization methods if you wish to serialize an
object.
"""
def camel_to_snake(s: str) -> str:
return CAMEL_TO_SNAKE_PAT.sub("_", s).lower()
actions_pb = [
Action_PB(
obj_type=".".join([action.__module__, action.__class__.__name__]),
**{camel_to_snake(action.__class__.__name__): serialize(action)}
)
for action in self.actions
]
inputs_pb = [inp._object2proto() for inp in self.inputs]
return Plan_PB(actions=actions_pb, inputs=inputs_pb)
@staticmethod
def _proto2object(proto: Plan_PB) -> "Plan":
"""Creates a ObjectWithID from a protobuf
As a requirement of all objects which inherit from Serializable,
this method transforms a protobuf object into an instance of this class.
:return: returns an instance of Plan
:rtype: Plan
.. note::
This method is purely an internal method. Please use syft.deserialize()
if you wish to deserialize an object.
"""
actions = []
for action_proto in proto.actions:
module, cls_name = action_proto.obj_type.rsplit(".", 1)
action_cls = getattr(sys.modules[module], cls_name)
# protobuf does no inheritance, so we wrap action subclasses
# in the main action class.
inner_action = getattr(action_proto, action_proto.WhichOneof("action"))
actions.append(action_cls._proto2object(inner_action))
inputs = [
Pointer._proto2object(pointer_proto) for pointer_proto in proto.inputs
]
return Plan(actions=actions, inputs=inputs)
|
from django.db import models
class Record(models.Model):
account = models.ForeignKey('accounts.Account', blank=True, null=True)
owner = models.ForeignKey('users.User', blank=True, null=True)
|
# system
import sys
# lib
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import SGDClassifier
# self
import data.ibc.treeUtil as treeUtil
sys.modules['treeUtil'] = treeUtil
from data.ibc.data import get_ibc_data
from data.twitter.data import get_congressional_twitter_data
from preprocessing.preprocess import clean_text_documents
def ibc_nlp_classification(validation_split=0.1,
shuffle=True):
"""
fit a series of general nlp classifiers on the
IBC annotated corpus to compare performance to
deep learning models
:param validation_split: the fraction of data to keep
for validation
:param shuffle: whether or not the shuffle the data
:return: Nothing. results are logged (printed)
"""
print('>> gathering data \n')
X, Y = get_ibc_data(use_subsampling=True)
X = clean_text_documents(X)
X = np.array(X)
Y = np.array(Y)
split = int(validation_split * len(X))
if shuffle:
p = np.random.permutation(len(X))
X = X[p]
Y = Y[p]
X_train, X_test = X[split:], X[:split]
Y_train, Y_test = Y[split:], Y[:split]
def run_pipeline(pipes):
text_clf = Pipeline(pipes)
text_clf.fit(X_train, Y_train)
predicted = text_clf.predict(X_train)
print('classifier got [ {} ]% accuracy on training data'.format(np.mean(predicted == Y_train)))
predicted = text_clf.predict(X_test)
print('classifier got [ {} ]% accuracy on validation data'.format(np.mean(predicted == Y_test)))
print('>>> fitting classifiers')
# SGD Classification
print('>> SGD Linear Model:')
run_pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier(loss='hinge', penalty='l2',
alpha=1e-3, random_state=42,
max_iter=5, tol=None))
])
# Multinomial Naive Bayes Classification
print('>> Multinomial Naive Bayes Classifier:')
run_pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', MultinomialNB())
])
def main():
"""
run a series of nlp tasks on the data we have gathered
as a baseline in the difficulty of classifying the sentiment
of political text.
:return: None.
"""
ibc_nlp_classification()
if __name__ == '__main__':
main()
|
# Generated by Django 4.0 on 2022-03-13 23:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('paginas', '0015_publicacao_link'),
]
operations = [
migrations.AlterField(
model_name='publicacao',
name='link',
field=models.URLField(blank=True, verbose_name='Link'),
),
]
|
#######################################################################
##### #####
##### Jeferson S. Pazze #####
##### jeferson.pazze@acad.pucrs.br #####
##### 01/16/2019 #####
##### LABIO - PUCRS #####
##### Histogram #####
##### #####
#######################################################################
# import the necessary packages
import numpy as np
import matplotlib.pyplot as plt
import imutils
import argparse
######################### historogram ####################
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file',help='Path to video file (if not using camera)')
parser.add_argument('-c', '--color', type=str, default='rgb', help='Color space: "gray" (default) or "rgb"')
parser.add_argument('-b', '--bins', type=int, default=16, help='Number of bins per channel (default 16)')
parser.add_argument('-w', '--width', type=int, default=0, help='Resize video to specified width in pixels (maintains aspect)')
args = vars(parser.parse_args())
# Configure VideoCapture class instance for using camera or file input.
color = args['color']
bins = args['bins']
resizeWidth = args['width']
# Initialize plot.
fig, ax = plt.subplots()
if color == 'rgb':
ax.set_title('Histogram (RGB)')
ax.set_xlabel('Bin')
ax.set_ylabel('Frequency')
else:
ax.set_title('Histogram (grayscale)')
ax.set_xlabel('Bin')
ax.set_ylabel('Frequency')
# Initialize plot line object(s). Turn on interactive plotting and show plot.
lw = 3
alpha = 0.5
if color == 'rgb':
lineR, = ax.plot(np.arange(bins), np.zeros((bins,)), c='r', lw=lw, alpha=alpha)
lineG, = ax.plot(np.arange(bins), np.zeros((bins,)), c='g', lw=lw, alpha=alpha)
lineB, = ax.plot(np.arange(bins), np.zeros((bins,)), c='b', lw=lw, alpha=alpha)
else:
lineGray, = ax.plot(np.arange(bins), np.zeros((bins,1)), c='k', lw=lw)
ax.set_xlim(0, bins-1)
ax.set_ylim(0, 1)
plt.ion()
plt.show()
######################### end historogram ####################
print('histogram inicialized')
|
"""
-----------------------------------------------------------------------------------------------------------
Package: AequilibraE
Name: Main interface for adding centroid connectors
Purpose: Load GUI and user interface for the centroid addition procedure
Original Author: Pedro Camargo (c@margo.co)
Contributors:
Last edited by: Pedro Camargo
Website: www.AequilibraE.com
Repository: https://github.com/AequilibraE/AequilibraE
Created: 2016-07-30
Updated: 2020-01-30
Copyright: (c) AequilibraE authors
Licence: See LICENSE.TXT
-----------------------------------------------------------------------------------------------------------
"""
import qgis
from qgis.gui import QgsMapLayerComboBox, QgsFieldComboBox
from qgis.core import *
from qgis.PyQt.QtCore import *
from qgis.PyQt import QtWidgets, uic
from qgis.PyQt.QtWidgets import *
from ..common_tools.auxiliary_functions import *
import sys
import os
from ..common_tools.global_parameters import *
from .adds_connectors_procedure import AddsConnectorsProcedure
FORM_CLASS, _ = uic.loadUiType(os.path.join(os.path.dirname(__file__), "../common_tools/forms/ui_empty.ui"))
class AddConnectorsDialog(QtWidgets.QDialog, FORM_CLASS):
def __init__(self, iface, project):
QtWidgets.QDialog.__init__(self)
self.iface = iface
self.setupUi(self)
self.NewLinks = False
self.NewNodes = False
self.project = project
if project is not None:
self.conn = project.conn
self.path_to_file = project.path_to_file
self._run_layout = QGridLayout()
spacer = QSpacerItem(5, 5, QSizePolicy.Expanding, QSizePolicy.Minimum)
# Centroid layer
frm1 = QHBoxLayout()
frm1.addItem(spacer)
self.CentroidLayer = QgsMapLayerComboBox()
self.CentroidLayer.layerChanged.connect(self.set_fields)
clabel = QLabel()
clabel.setText("Centroids layer")
frm1.addWidget(clabel)
frm1.addWidget(self.CentroidLayer)
self.CentroidLayer.setMinimumSize(450, 30)
wdgt1 = QWidget()
wdgt1.setLayout(frm1)
self.CentroidField = QgsFieldComboBox()
self.CentroidField.setMinimumSize(450, 30)
frm2 = QHBoxLayout()
frm2.addItem(spacer)
flabel = QLabel()
flabel.setText("Centroid ID field")
frm2.addWidget(flabel)
frm2.addWidget(self.CentroidField)
wdgt2 = QWidget()
wdgt2.setLayout(frm2)
self.CentroidLayer.setFilters(QgsMapLayerProxyModel.PointLayer)
frm3 = QHBoxLayout()
self.IfMaxLength = QCheckBox()
self.IfMaxLength.setChecked(True)
self.IfMaxLength.setText("Connector maximum length")
self.IfMaxLength.toggled.connect(self.allows_distance)
frm3.addWidget(self.IfMaxLength)
frm3.addItem(spacer)
self.MaxLength = QLineEdit()
frm3.addWidget(self.MaxLength)
frm3.addItem(spacer)
lblmeters = QLabel()
lblmeters.setText(" meters")
frm3.addWidget(lblmeters)
frm3.addItem(spacer)
lblnmbr = QLabel()
lblnmbr.setText("Connectors per centroid")
frm3.addWidget(lblnmbr)
self.NumberConnectors = QComboBox()
for i in range(1, 40):
self.NumberConnectors.addItem(str(i))
frm3.addWidget(self.NumberConnectors)
wdgt3 = QWidget()
wdgt3.setLayout(frm3)
layer_frame = QVBoxLayout()
layer_frame.addWidget(wdgt1)
layer_frame.addWidget(wdgt2)
layer_frame.addWidget(wdgt3)
lyrfrm = QWidget()
lyrfrm.setLayout(layer_frame)
# action buttons
self.but_process = QPushButton()
if self.project is None:
self.but_process.setText("Project not loaded")
self.but_process.setEnabled(False)
else:
self.but_process.setText("Run!")
self.but_process.clicked.connect(self.run)
self.but_cancel = QPushButton()
self.but_cancel.setText("Cancel")
self.but_cancel.clicked.connect(self.exit_procedure)
self.progressbar = QProgressBar()
self.progress_label = QLabel()
self.progress_label.setText("...")
but_frame = QHBoxLayout()
but_frame.addWidget(self.progressbar, 1)
but_frame.addWidget(self.progress_label, 1)
but_frame.addWidget(self.but_cancel, 1)
but_frame.addItem(spacer)
but_frame.addWidget(self.but_process, 1)
self.but_widget = QWidget()
self.but_widget.setLayout(but_frame)
# Progress bars and messagers
self.progress_frame = QVBoxLayout()
self.status_bar_files = QProgressBar()
self.progress_frame.addWidget(self.status_bar_files)
self.status_label_file = QLabel()
self.status_label_file.setText("Extracting: ")
self.progress_frame.addWidget(self.status_label_file)
self.status_bar_chunks = QProgressBar()
self.progress_frame.addWidget(self.status_bar_chunks)
self.progress_widget = QWidget()
self.progress_widget.setLayout(self.progress_frame)
self.progress_widget.setVisible(False)
self._run_layout.addWidget(lyrfrm)
self._run_layout.addWidget(self.but_widget)
self._run_layout.addWidget(self.progress_widget)
list = QWidget()
listLayout = QVBoxLayout()
self.list_types = QTableWidget()
self.list_types.setMinimumSize(180, 80)
lbl = QLabel()
lbl.setText("Allowed link types")
listLayout.addWidget(lbl)
listLayout.addWidget(self.list_types)
list.setLayout(listLayout)
if self.project is not None:
curr = self.conn.cursor()
curr.execute("SELECT DISTINCT link_type FROM links ORDER BY link_type")
ltypes = curr.fetchall()
self.list_types.setRowCount(len(ltypes))
self.list_types.setColumnCount(1)
for i, lt in enumerate(ltypes):
self.list_types.setItem(i, 0, QTableWidgetItem(lt[0]))
self.list_types.selectAll()
allStuff = QWidget()
allStuff.setLayout(self._run_layout)
allLayout = QHBoxLayout()
allLayout.addWidget(allStuff)
allLayout.addWidget(list)
self.setLayout(allLayout)
self.resize(700, 135)
# default directory
self.path = standard_path()
self.set_fields()
self.IfMaxLength.setChecked(False)
def allows_distance(self):
self.MaxLength.setEnabled(False)
if self.IfMaxLength.isChecked():
self.MaxLength.setEnabled(True)
def run_thread(self):
self.worker_thread.ProgressValue.connect(self.progress_value_from_thread)
self.worker_thread.ProgressText.connect(self.progress_text_from_thread)
self.worker_thread.ProgressMaxValue.connect(self.progress_range_from_thread)
self.worker_thread.jobFinished.connect(self.job_finished_from_thread)
self.worker_thread.start()
self.show()
def progress_range_from_thread(self, val):
self.progressbar.setRange(0, val)
def progress_value_from_thread(self, value):
self.progressbar.setValue(value)
def progress_text_from_thread(self, value):
self.progress_label.setText(value)
def set_fields(self):
self.CentroidField.setLayer(self.CentroidLayer.currentLayer())
def job_finished_from_thread(self, success):
self.but_process.setEnabled(True)
if self.worker_thread.error is not None:
qgis.utils.iface.messageBar().pushMessage("Error during procedure: ", self.worker_thread.error,
level=Qgis.Warning, duration=6)
self.exit_procedure()
def run(self):
if self.MaxLength.isEnabled():
max_length = float(self.MaxLength.text())
else:
max_length = 1000000000000
self.link_types = []
for i in range(self.list_types.rowCount()):
if self.list_types.item(i, 0).isSelected():
self.link_types.append(self.list_types.item(i, 0).text())
# If we selected all, we don;t need to filter by it
if len(self.link_types) == self.list_types.rowCount():
self.link_types = []
parameters = [self.project.path_to_file,
self.CentroidLayer.currentText(),
self.CentroidField.currentText(),
max_length,
int(self.NumberConnectors.currentText()),
self.link_types]
self.but_process.setEnabled(False)
self.worker_thread = AddsConnectorsProcedure(qgis.utils.iface.mainWindow(), *parameters)
self.run_thread()
def exit_procedure(self):
self.close()
|
import requests
# from bs4 import BeautifulSoup
import re
import os
import pymysql
HEADER = {'User-Agent': 'Mozilla/5.0'}
SOURCE_INFO_URL = 'http://www.icourse163.org/dwr/call/plaincall/CourseBean.getMocTermDto.dwr'
SOURCE_RESOURCE_URL = 'http://www.icourse163.org/dwr/call/plaincall/CourseBean.getLessonUnitLearnVo.dwr'
class Course(object):
'''
存储课程相关信息
'''
def __init__(self, *args, **kwargs):
self.course_page_url = 'http://www.icourse163.org/learn/'
def set_course(self, course):
self.course = course
def get_course_info(self):
try:
'''
获取课程基本信息
获取课程id用于发送post请求
'''
course_page_url = self.course_page_url + self.course
course_page = requests.get(course_page_url, headers=HEADER)
id_pattern_compile = re.compile(r'id:(\d+),')
# 获取课程名称
basicinfo_pattern_compile = re.compile(
r'<meta name="description" .*?content=".*?,(.*?),(.*?),.*?/>')
basic_set = re.search(basicinfo_pattern_compile, course_page.text)
self.course_title = basic_set.group(1)
self.course_collage = basic_set.group(2)
self.course_id = re.search(id_pattern_compile,
course_page.text).group(1)
except ArithmeticError:
pass
def get_course_all_source(course_id):
'''
通过解析的course_id获取当前所有可下载的资源信息
'''
# 选择下载视频的清晰度
video_level = select_video_level()
# c0-param0:代表课程id
# batchId:可以为任意时间戳
# 其他字段为固定不变字段
print(video_level + '---video_level')
post_data = {
'callCount': '1',
'scriptSessionId': '${scriptSessionId}190',
'c0-scriptName': 'CourseBean',
'c0-methodName': 'getMocTermDto',
'c0-id': '0',
'c0-param0': 'number:' + course_id,
'c0-param1': 'number:1',
'c0-param2': 'boolean:true',
'batchId': '1492167717772'
}
print(course_id + '---get_course_all_source')
source_info = requests.post(
SOURCE_INFO_URL, data=post_data, headers=HEADER)
# 对文档内容进行解码,以便查看中文
source_info_transcoding = source_info.text.encode('utf-8').decode(
'unicode_escape')
# 这里的id是一级目录id
chapter_pattern_compile = re.compile(
r'homeworks=.*?;.+id=(\d+).*?name="(.*?)";')
# 查找所有一级级目录id和name
chapter_set = re.findall(chapter_pattern_compile, source_info_transcoding)
with open('TOC.txt', 'w', encoding='utf-8') as file:
# 遍历所有一级目录id和name并写入目录
for index, single_chaper in enumerate(chapter_set):
file.write('%s \n' % (single_chaper[1]))
# 这里id为二级目录id
lesson_pattern_compile = re.compile(
r'chapterId=' + single_chaper[0] +
r'.*?contentType=1.*?id=(\d+).+name="(.*?)".*?test')
# 查找所有二级目录id和name
lesson_set = re.findall(lesson_pattern_compile,
source_info_transcoding)
# 遍历所有二级目录id和name并写入目录
for sub_index, single_lesson in enumerate(lesson_set):
file.write(' %s \n' % (single_lesson[1]))
# 查找二级目录下视频,并返回 [contentid,contenttype,id,name]
video_pattern_compile = re.compile(
r'contentId=(\d+).+contentType=(1).*?id=(\d+).*?lessonId='
+ single_lesson[0] + r'.*?name="(.+)"')
video_set = re.findall(video_pattern_compile,
source_info_transcoding)
# 查找二级目录下文档,并返回 [contentid,contenttype,id,name]
pdf_pattern_compile = re.compile(
r'contentId=(\d+).+contentType=(3).+id=(\d+).+lessonId=' +
single_lesson[0] + r'.+name="(.+)"')
pdf_set = re.findall(pdf_pattern_compile,
source_info_transcoding)
name_pattern_compile = re.compile(
r'^[第一二三四五六七八九十\d]+[\s\d\._章课节讲]*[\.\s、]\s*\d*')
# 遍历二级目录下视频集合,写入目录并下载
count_num = 0
for video_index, single_video in enumerate(video_set):
rename = re.sub(name_pattern_compile, '', single_video[3])
file.write(' [视频] %s \n' % (rename))
get_content(
course_id,
single_video, '%d.%d.%d [视频] %s' %
(index + 1, sub_index + 1, video_index + 1, rename),
video_level)
count_num += 1
# 遍历二级目录下pdf集合,写入目录并下载
for pdf_index, single_pdf in enumerate(pdf_set):
rename = re.sub(name_pattern_compile, '', single_pdf[3])
file.write(' [文档] %s \n' % (rename))
get_content(
course_id,
single_pdf, '%d.%d.%d [文档] %s' %
(index + 1, sub_index + 1, pdf_index + 1 + count_num,
rename))
def get_content(course_id, single_content, name, *args):
conn = pymysql.connect(host='127.0.0.1', port=3306, db='graduate',
user='root', password='13452078118')
cusor = conn.cursor()
course = Course()
# 定义插入sql
# 插入资源
sql1 = "insert into resource (course_id,resource_type,resource_name) value (%s,%s,%s)"
# 插入视频
sql2 = "insert into pdf (resource_id,pdf_url) value (%s,%s)"
# 插入文档
sql3 = "insert into mp4 (resource_id,mp4_url) value (%s,%s)"
# 定义下载地址
url2 = ''
'''
如果是文档,则直接下载
如果是视频,则保存链接供第三方下载
'''
# 检查文件命名,防止网站资源有特殊字符本地无法保存
file_pattern_compile = re.compile(r'[\\/:\*\?"<>\|]')
name = re.sub(file_pattern_compile, '', name)
# 检查是否有重名的(即已经下载过的)
if os.path.exists('PDFs\\' + name + '.pdf'):
print(name + "------------->已下载")
return
post_data = {
'callCount': '1',
'scriptSessionId': '${scriptSessionId}190',
'httpSessionId': '5531d06316b34b9486a6891710115ebc',
'c0-scriptName': 'CourseBean',
'c0-methodName': 'getLessonUnitLearnVo',
'c0-id': '0',
'c0-param0': 'number:' + single_content[0], # 二级目录id
'c0-param1': 'number:' + single_content[1], # 判定文件还是视频
'c0-param2': 'number:0',
'c0-param3': 'number:' + single_content[2], # 具体资源id
'batchId': '1492168138043'
}
sources = requests.post(
SOURCE_RESOURCE_URL, headers=HEADER, data=post_data).text
# 如果是视频的话
if single_content[1] == '1':
try:
if args[0] == 'a':
download_pattern_compile = re.compile(
r'mp4SdUrl="(.*?\.mp4).*?"')
elif args[0] == "b":
download_pattern_compile = re.compile(
r'mp4HdUrl="(.*?\.mp4).*?"')
else:
download_pattern_compile = re.compile(
r'mp4ShdUrl="(.*?\.mp4).*?"')
video_down_url = re.search(download_pattern_compile,
sources).group(1)
except AttributeError:
# print('------------------------')
# print(name + '没有该清晰度格式,降级处理')
# print('------------------------')
# download_pattern_compile = re.compile(r'mp4SdUrl="(.*?\.mp4).*?"')
# video_down_url = re.search(download_pattern_compile,
# sources).group(1)
# 如果发生异常,跳过
return
print('正在存储链接:' + name + '.mp4')
with open('Links.txt', 'a', encoding='utf-8') as file:
file.write('%s \n' % (video_down_url))
url2 = video_down_url
with open('Rename.bat', 'a', encoding='utf-8') as file:
video_down_url = re.sub(r'/', '_', video_down_url)
# file.write('rename "' + re.search(
# r'http:.*video_(.*.mp4)', video_down_url).group(1) + '" "' +
# name + '.mp4"' + '\n')
file.write(name + '\n')
# 先插入resource
cusor.execute(sql1, (course_id, 'mp4', name))
# 获取主键id,再插入mp4
cusor.execute(sql3, (conn.insert_id(), url2))
conn.commit()
# 如果是文档的话
else:
pdf_download_url = re.search(r'textOrigUrl:"(.*?)"', sources).group(1)
print('正在下载:' + name + '.pdf')
pdf_file = requests.get(pdf_download_url, headers=HEADER)
if not os.path.isdir('PDFs'):
os.mkdir(r'PDFs')
with open('PDFLinks.txt', 'a', encoding='utf-8') as file:
file.write('%s \n' % (pdf_download_url))
with open('PDFName.txt', 'a', encoding='utf-8') as file:
file.write('%s \n' % (name))
# with open('PDFs\\' + name + '.pdf', 'wb') as file:
# file.write(pdf_file.content)
# 插入文档表
# 先插入resource
cusor.execute(sql1, (course_id, 'pdf', name))
# 获取主键id,再插入mp4
cusor.execute(sql2, (conn.insert_id(), pdf_download_url))
conn.commit()
cusor.close()
conn.close()
def select_video_level():
# '''
# 选择视频质量
# '''
# print('\n')
# print('------------------------')
# print("| 请选择视频质量: |")
# print("| |")
# print("|(a)标清 (b)高清 (c)超清 |")
# print("| |")
# print('------------------------')
# video_level = input('请选择(a或b或c)')
# level = {'a': "标清", 'b': '高清', 'c': "超清"}
# print('\n')
# print('------------------------')
# print('视频将下载为【' + level.get(video_level) + '】')
# print('------------------------')
# print('\n')
return 'a'
def check_select_course(course):
# '''
# 提供用户监测输入的课程编号是否正确
# '''
# print("\n")
# print('------------------------')
# print('您选择的是:')
# print(course.course_title + '\n' + course.course_collage)
# print('------------------------')
# return input('请确认输入(y/n):')
return 'y'
def main():
conn = pymysql.connect(host='127.0.0.1', port=3306, db='graduate',
user='root', password='13452078118')
cusor = conn.cursor()
course = Course()
# 因为Links文件夹为追加模式打开,所以需要事先删除
if os.path.exists('Links.txt'):
os.remove('Links.txt')
if os.path.exists('PDFLinks.txt'):
os.remove('PDFLinks.txt')
if os.path.exists('PDFName.txt'):
os.remove('PDFName.txt')
# 同样是追加模式,首先删除原来的,然后确定新的编码格式
if os.path.exists("Rename.bat"):
os.remove("Rename.bat")
with open('Rename.bat', 'a', encoding='utf-8') as file:
file.writelines('chcp 65001\n')
# 查询课程id
sql = "select course_id from mooc "
cusor.execute(sql)
acourseid = cusor.fetchall()
bcourseid = []
# 将查处的元祖中的元祖列表转为字符串数组
for a in acourseid:
bcourseid.append(a[0])
# print(bcourseid)
# for i in courseid:
# print(i)
conn.commit()
# cusor.close()
# conn.close()
# while True:
# course.set_course(input("\n请输入课程id(例如SICNU-1002031014)"))
# 进行遍历存储
sql4 = 'update mooc set course_id=%s where course_id=%s'
# for i in bcourseid:
# try:
# course.set_course(i)
# course.get_course_info()
# # if check_select_course(course) == 'y':
# # break
# #更改mooc course_id名
# print(i)
# print(course.course_id)
# cusor.execute(sql4,[course.course_id,i])
# conn.commit()
# #获取资源
# # get_course_all_source(course.course_id)
# except BaseException:
# continue
course_id = 'SWJTU - 1001911007'
course.set_course(course_id)
course.get_course_info()
cusor.execute(sql4, [course.course_id, course_id])
conn.commit()
# print(bcourseid[0])
# get_course_all_source(bcourseid[0])
cusor.close()
conn.close()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
import sys
from ont_fast5_api.multi_fast5 import MultiFast5File
from ont_fast5_api.fast5_info import _clean
__author__ = 'Huanle.Liu@crg.eu'
__version__ = '0.2'
__email__ = 'same as author'
usage = '''
python fast5_type.py fast5file
return:
0: single read fast5
1: multi-reads fast5
'''
if len (sys.argv) !=2:
print (usage, file=sys.stderr)
sys.exit()
def check_file_type(f5_file):
try:
return 1 if _clean(f5_file.handle.attrs['file_type']).startswith('multi') else 0
except KeyError:
if len(f5_file.handle) == 0 :
return 1
if len([read for read in f5_file.handle if read.startswith('read_')]) !=0 :
return 1
if 'UniqueGlobalKey' in f5_file.handle:
return 0
raise TypeError('file can not be indetified as single- or multi- read.\n' 'File path: {}'.format(f5_file.filename))
filepath = sys.argv[1]
f5_file = MultiFast5File (filepath, mode='r')
filetype = check_file_type (f5_file)
#filetype = 1 if filetype.startswith ('multi') or else 0
print (filetype)
|
# -*- coding: utf-8 -*-
'''
The event processor handles incoming events and is called from server.py.
'''
# Import Tornado libs
from tornado import gen
# Import Tamarack libs
import tamarack.github
import tamarack.utils.prs
@gen.coroutine
def handle_event(event_data, token):
'''
An event has been received. Decide what to do with it.
Presently, only pull requests are handled but this can be expanded later.
event_data
Payload sent from GitHub.
token
GitHub user token.
'''
if event_data.get('pull_request'):
yield handle_pull_request(event_data, token)
@gen.coroutine
def handle_pull_request(event_data, token):
'''
Handles Pull Request events by examining the type of action that was triggered
and then decides what to do next.
For example, if a Pull Request is opened, the bot needs to comment on the pull
request with the list of teams that should be reviewing the PR (if applicable).
Currently this function only handles "opened" events for PRs and has the bot
comment on the PR with the list of teams/users that should potentially review
the submission. However, this can be easily expanded in the future.
event_data
Payload sent from GitHub.
token
GitHub user token.
'''
print('Received pull request event. Processing...')
action = event_data.get('action')
if action == 'opened':
# Eventually we should move this to an "assign_reviewers" function,
# but we need to wait for GitHub to expose this functionality for
# team reviews. It will work for individual members, but not teams
# presently. We could also loop through each team and request a
# review from each individual member, but let's comment for now.
yield tamarack.utils.prs.mention_reviewers(event_data, token)
else:
print('Skipping. Action is \'{0}\'. '
'We only care about \'opened\'.'.format(action))
return
|
# -*- encoding: utf-8 -*-
import os
from flask import render_template, request,jsonify, url_for
from flask_login import login_required
from jinja2 import TemplateNotFound
from app.home import blueprint
import pandas as pd
import numpy as np
from config import Config
config = Config()
from pathlib import PurePosixPath
pd.set_option("display.float_format", lambda x: "%.5f" % x) # pandas
pd.set_option("display.max_columns", 100)
pd.set_option("display.max_rows", 100)
pd.set_option("display.width", 600)
# remote_path = PurePosixPath(gcs_path, local_file[1 + len(local_path):])
@blueprint.route('/index')
@login_required
def index():
return render_template('index.html', segment='index')
@blueprint.route('/overview')
def overview():
# filepath = r'D:\PROJECTS\645\crunchbase-october-2013\crunchbase-companies.csv'
filepath_investments = os.path.join(config.datadir,r'crunchbase-investments.zip')
df_investments = pd.read_csv(r'https://storage.googleapis.com/crunchbase-data-2020-09-18t12-39-18/crunchbase-investments.zip', compression='zip')
df_investments = df_investments.dropna(subset=['raised_amount_usd'])
df_investments['raised_amount_usd'] = pd.to_numeric(df_investments['raised_amount_usd'])
df_category = df_investments.groupby(['company_category_code'])['raised_amount_usd']\
.sum().reset_index(name='sum_of_category')\
.sort_values('sum_of_category', ascending=False)\
.head(15)\
.sort_values('company_category_code')
data_list = [row.to_list() for id, row in df_category.iterrows()]
# filepath_events = r'D:\PROJECTS\645\crunchbase-october-2013\events.csv'
df_events = pd.read_csv(r'https://storage.googleapis.com/crunchbase-data-2020-09-18t12-39-18/events.csv')
columns = ['name', 'cb_url', 'started_on', 'city']
df_events = df_events.T.reindex(columns).T.head(10)
df_events = df_events.sort_values('started_on',ascending=False)
# filepath_news = r'D:\PROJECTS\645\boilerplate-code-flask-dashboard\data\news.csv'
df_news = pd.read_csv(r'https://storage.googleapis.com/crunchbase-data-2020-09-18t12-39-18/news.csv')
df_news = df_news.sort_values('date',ascending=False).head(15)
return render_template('page-new.html', segment='overview', data_list=data_list, df_events=df_events.itertuples(), df_news = df_news.itertuples())
@blueprint.route('/industry/<industry>')
@login_required
def industry(industry):
segment = get_segment(request)
return render_template('page-industry.html', segment='industry')
@blueprint.route('/<template>')
@login_required
def route_template(template):
try:
if not template.endswith('.html'):
template += '.html'
# Detect the current page
segment = get_segment(request)
# Serve the file (if exists) from app/templates/FILE.html
return render_template(template, segment=segment)
except TemplateNotFound:
return render_template('page-404.html'), 404
except:
return render_template('page-500.html'), 500
# Helper - Extract current page name from request
def get_segment(request):
try:
segment = request.path.split('/')[-1]
if segment == '':
segment = 'index'
return segment
except:
return None
|
#!/usr/bin/python3
from elftools.elf.elffile import ELFFile
from elftools.elf.relocation import RelocationSection
from elftools.elf.sections import StringTableSection
from elftools.elf.sections import SymbolTableSection
from elftools.elf.sections import *
import pefile
from pinja.color.color import *
def get_pe_entrypoint(filepath):
with open(filepath, 'rb') as f:
pe = pefile.PE(f.name)
# print debug
if 0:
print_green("{0}".format(pe.OPTIONAL_HEADER))
print_blue("{0}".format(pe.OPTIONAL_HEADER.AddressOfEntryPoint))
entrypoint = pe.OPTIONAL_HEADER.AddressOfEntryPoint
return entrypoint
def get_pe_raw_entrypoint(filepath):
with open(filepath, 'rb') as f:
pe = pefile.PE(f.name)
entrypoint = pe.OPTIONAL_HEADER.AddressOfEntryPoint
try:
section = next(
s for s in pe.sections
if 0 <= entrypoint - s.VirtualAddress <= s.SizeOfRawData)
except StopIteration:
raise Exception('No section contains entrypoint.')
entrypoint_raw = (entrypoint
- section.VirtualAddress
+ section.PointerToRawData)
# print debug
if 0:
print_green("{0}".format(pe.OPTIONAL_HEADER))
print_green("{0}".format(section))
print_blue("{0}".format(entrypoint_raw))
return entrypoint_raw
def get_elf_entrypoint(filepath):
debug = 0
with open(filepath, 'rb') as f:
elf = ELFFile(f)
# print debug
if debug:
print_green("{0}".format(elf.header))
print_blue("{0}".format(elf.header.e_entry))
entrypoint = elf.header.e_entry
return entrypoint
def get_pe_ALLsymbol_address(filepath):
debug1 = 0
debug2 = 0
debug3 = 0
allsymbol = []
with open(filepath, 'rb') as f:
pe = pefile.PE(f.name)
if debug1:
print_green("{0}".format(pe.OPTIONAL_HEADER))
print_blue("{0}".format(pe.OPTIONAL_HEADER.AddressOfEntryPoint))
# print section name and some address
for section in pe.sections:
print_yelow("{}, {}, {}, {}".format(section.Name, hex(section.VirtualAddress), hex(section.Misc_VirtualSize), hex(section.SizeOfRawData) ))
# If the PE file was loaded using the fast_load=True argument, we will need to parse the data directories:
#pe.parse_data_directories()
for entry in pe.DIRECTORY_ENTRY_IMPORT:
print_blue("{}".format(entry.dll))
for imp in entry.imports:
print_purple("{}, {}".format(hex(imp.address), imp.name))
if debug2:
for exp in pe.DIRECTORY_ENTRY_EXPORT:
print_green("{}, {}, {}".format(hex(pe.OPTIONAL_HEADER.ImageBase + exp.address), exp.name, exp.ordinal))
for entry in pe.DIRECTORY_ENTRY_IMPORT:
print_yelow(entry.dll)
for imp in entry.imports:
print_blue("{}, {}".format(hex(imp.address), imp.name))
for exp in pe.DIRECTORY_ENTRY_EXPORT.symbols:
print_green("{}, {}".format(pe.OPTIONAL_HEADER.ImageBase + exp.address), exp.name, exp.ordinal)
print_blue("{}".format(pe.dump_info()))
if debug3:
for section in pe.sections:
print_green("{}, {}, {}, {}, {}".format(
section.Name,
hex(section.VirtualAddress),
hex(section.Misc_VirtualSize),
section.SizeOfRawData,
section.get_entropy()))
if section.Name == '.text':
print_red("{}, {}".format((section.PointerToRawData),hex(section.Misc_VirtualSize)))
allsymbol.append([0, 0, 0])
return allsymbol
def get_elf_ALLsymbol_address(filepath):
debug = 0
allsymbol = []
with open(filepath, 'rb') as f:
elf = ELFFile(f)
symtab = elf.get_section_by_name('.symtab')
# store the symbol information into .text section(.sym is not ) of the file
if debug:
print_green(".text-address({0}): {1}, ".format(f.name, elf.get_section_by_name('.text')['sh_addr']))
if symtab is not None:
for element in symtab.iter_symbols():
if debug:
print_red("ALL: {0}, ".format(dir(element.entry)))
print_purple_noLF("name: {0}, ".format(element.name))
print_blue_noLF("st_name: {0}".format(element.entry['st_name']))
print_green_noLF("address: {0}, ".format(hex(element.entry['st_value'])))
print_red("size {0},".format(hex(element.entry['st_size'])))
if element.entry['st_size'] != 0:
allsymbol.append([element.name, element.entry['st_value'], element.entry['st_size']])
else:
print_blue("WARNING(no symbol) : {0}".format(f.name))
allsymbol.append([0, 0, 0])
# dynsym
dynsym = 0
if dynsym:
dynsym = elf.get_section_by_name('.dynsym')
if dynsym is not None:
for element in dynsym.iter_symbols():
# print debug
if debug:
print_red("ALL: {0}, ".format(dir(element.entry)))
print_purple_noLF("name: {0}, ".format(element.name))
print_blue_noLF("st_name: {0}".format(element.entry['st_name']))
print_green_noLF("address: {0}, ".format(hex(element.entry['st_value'])))
print_red("size {0},".format(hex(element.entry['st_size'])))
if element.entry['st_size'] != 0:
allsymbol.append([element.name, element.entry['st_value'], element.entry['st_size']])
else:
print_red("{0}: No symbol-info".format(f.name))
allsymbol.append([0, 0, 0])
return allsymbol
# Just Reference code, so this function do not use in main() of pinja
def get_elf_ALLsymbol_address_otherinformation(filepath):
debug = 0
with open(filepath, 'rb') as f:
elf = ELFFile(f)
for section in elf.iter_sections():
symbol = [hex(section['sh_addr']), section.name]
if debug:
print_yelow("{0}".format(symbol))
print_purple("{0}".format(f'{section.name}'))
print(dir(section))
if isinstance(section, StringTableSection):
if debug:
print_red("{0}".format(f'{section.name}:'))
symbol_table = elf.get_section(section['sh_link'])
if isinstance(section, SymbolTableSection):
if debug:
print_red("{0}".format(f'{section.name}:'))
symbol_table = elf.get_section(section['sh_link'])
if debug:
print_red(dir(elf.get_section(section['sh_link']).name))
print_green(elf.get_section_by_name('.text')['sh_addr'])
print_green(elf.get_section_by_name('.text')['sh_name'])
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mdsclient
from plugins.contracts import ICartridgeAgentPlugin
import time
import zipfile
import subprocess
from modules.util.log import LogFactory
import os
class TomcatServerStarterPlugin(ICartridgeAgentPlugin):
def run_plugin(self, values):
log = LogFactory().get_log(__name__)
# start tomcat
tomcat_start_command = "exec ${CATALINA_HOME}/bin/startup.sh"
log.info("Starting Tomcat server: [command] %s" % tomcat_start_command)
p = subprocess.Popen(tomcat_start_command, shell=True)
output, errors = p.communicate()
log.debug("Tomcat server started: [command] %s, [output] %s" % (p.args, output))
|
#!/usr/bin/python -tt
'''
I did this to understand Erlang calculation.
It is largely based on documentation and examples from www.erlang.com
This algorithm models a time-spread load to predict number of agents (engines, processors, phone lines, call centre operators/agents, etc)
in a queuing system.
'''
import math
def ErlangCPODelayTime(Traffic, Lines, HoldTime, DelayTime):
Probability = 0
Probability = ErlangC(Traffic, Lines) * math.exp(-(Lines - Traffic) * DelayTime / HoldTime)
if (Probability > 1):
return 1
else:
# probability that a request will be in queue for time less or equal to
# DelayTime
return Probability
def ErlangB(Traffic, pLines):
# PBR,index;
if (Traffic > 0):
PBR = (1 + Traffic) / Traffic
for index in range(2, pLines + 1):
PBR = index / Traffic * PBR + 1
if (PBR > 10000):
return 0
return 1 / PBR
else:
return 0
def ErlangC(Traffic, pLines):
# EBResult,Probability
EBResult = ErlangB(Traffic, pLines)
Probability = EBResult / (1 - (Traffic / pLines) * (1 - EBResult))
if (Probability > 1):
return 1
else:
return Probability
def CallDurationCheck(DurationValue): # mean call duration
# DurationValue
if ((DurationValue >= 10) & (DurationValue <= 1200)):
return DurationValue
elif (DurationValue < 10):
return 10
else:
return 1200
def PercentageCheck(PercentageValue): # % of requests served in due time (without delay), i.e. in AnsweredIn
if ((PercentageValue>=10) & (PercentageValue<=95)):
return PercentageValue
elif (PercentageValue<10):
return 10
else:
return 95
def WrapTimeCheck(WrapTimeValue): #time in seconds
if ((WrapTimeValue>=0) & (WrapTimeValue<=300)):
return WrapTimeValue
elif (WrapTimeValue<0):
return 0
else:
return 300
def CallPerHourCheck(CallsPerHourValue): # call freq in a given hour
if ((CallsPerHourValue>=10) & (CallsPerHourValue<=5000)):
return CallsPerHourValue
elif (CallsPerHourValue<10):
return 10
else:
return 5000
def CalculateHour(HourName,CallsPerHourValue,DurationValue,WrapTimeValue,AnsweredIn,PercentageValue):
# CallsPerHourValue,AgentCounter,AgentBusyTime,AverageDelayAll,ECTraffic,EBTraffic,Lines
CallsPerHourValue = CallPerHourCheck(CallsPerHourValue)
AgentBusyTime = CallDurationCheck(DurationValue) + WrapTimeCheck(WrapTimeValue)
ECTraffic = AgentBusyTime * CallsPerHourValue/3600
AgentCounter = math.floor(ECTraffic) + 1
while (ErlangCPODelayTime(ECTraffic,AgentCounter,AgentBusyTime,AnsweredIn)>(100-PercentageCheck(PercentageValue))/100):
AgentCounter+=1
AverageDelayAll = math.floor( ErlangC(ECTraffic,AgentCounter) * AgentBusyTime/(AgentCounter-ECTraffic)) + 1
EBTraffic = CallsPerHourValue * (AverageDelayAll + CallDurationCheck(DurationValue) )/3600
Lines = math.floor(CallDurationCheck(DurationValue) * CallsPerHourValue/3600)+1
TrunkBlockingTarget = 0.01 # Trunk blocking target
# "fraction of the total calls which will be lost because insufficient lines have been provided"
if (EBTraffic>0):
while (ErlangB(EBTraffic,Lines)>TrunkBlockingTarget):
Lines+=1
# results:
Hour = dict()
Hour['HourName']=HourName # as-is
Hour['CallsPerHour']=CallsPerHourValue
Hour['AverageDelayAll']= AverageDelayAll
Hour['AgentBusyTime']=AgentBusyTime
Hour['AgentCounter']=AgentCounter
Hour['Lines']=Lines
return Hour # dictionary
def PeakResults(HoursData): # all-hours list
Peak = dict()
Peak['PeakHourCallsPerHour'] = 0
Peak['LinesRequired'] = 0
Peak['MaximumAgents'] = 0
Peak['PeakHour'] = 0
for Hour in HoursData:
Peak['LinesRequired'] = max(Peak['LinesRequired'],Hour['Lines'])
Peak['MaximumAgents'] = max(Peak['MaximumAgents'],Hour['AgentCounter'])
PeakHour = max(Peak['PeakHourCallsPerHour'],Hour['CallsPerHour'])
if PeakHour == Hour['CallsPerHour']:
Peak['PeakHour']= Hour['HourName']
return Peak # dictionary
def main():
print('Erlang calculation demo')
DurationValue = 300 # seconds, mean call duration time.
'''
Consider this: http://megamozg.ru/company/rocketcallback/blog/14610/
Of all calls they've analysed:
62.25% - x<1 min
22.875% - 1<x<3 mins
12% - 3<x<5 mins
06.625% - 5<x mins
They also provide a limited industry breakdown of mean call duration.
'''
WrapTimeValue = 60 # seconds
PercentageValue = 80 # % should be served in AnsweredIn
AnsweredIn = 30 # seconds
HoursData = []
print("Input mean number of calls for each hour:")
for Hour in range(0, 23):
cph = input("\t"+str(Hour)+":")
CallsPerHourValue = int(cph)
HoursData.append(CalculateHour(str(Hour),CallsPerHourValue,DurationValue,WrapTimeValue,AnsweredIn,PercentageValue))
Peak = PeakResults(HoursData)
print('Asumptions:\n')
print(' Mean work duration is', DurationValue, "seconds")
print(' Server recovery (wrap) time is', WrapTimeValue)
print(' We expect that', PercentageValue, "of requests are served in",AnsweredIn, "seconds")
print(' That would require:\n', Peak['LinesRequired'], "queues (connection lines)")
print(' and a maximum of', Peak['MaximumAgents'], "servers (agents)")
print(' the peak hour is', Peak['PeakHour'])
if str(input("Print details? (y/N) "))=="y":
for Hour in HoursData:
print('\n\n>Hour: ',Hour['HourName'])
print('=>Calls Per Hour ',Hour['CallsPerHour'])
print('=>Average enqueued Delay ',Hour['AverageDelayAll'])
print('=>Mean Agent Busy Time ',Hour['AgentBusyTime'])
print('=>Agents Required ',Hour['AgentCounter'])
print('=>Lines Required ',Hour['Lines'])
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import torch as T
from torch.autograd import Variable as var
import torch.nn.functional as F
import torch.optim as optim
import torch.nn as nn
from .core import NPICore
class NPI(nn.Module):
def __init__(
self,
encoder,
input_size,
hidden_size,
num_layers=2,
kind='lstm',
dropout=0,
bidirectional=False,
num_args=10,
programs=1000,
**kwargs
):
super(NPI, self).__init__()
# networks
self.encoder = encoder
self.core_net = NPICore(input_size, hidden_size, num_layers, kind, dropout, bidirectional, **kwargs)
self.terminator_net = TerminatorNet(input_size * hidden_size)
self.argument_net = ArgumentNet(input_size * hidden_size, hidden_size, num_args)
self.key_net = KeyNet(input_size * hidden_size, hidden_size, programs)
# variables
self.memory =
|
import os
import pickle as pkl
from collections import Iterable
class LineCache(object):
'''
LineCache caches the line position of a file in the memory. Everytime it access a line, it will seek to the related postion and readline().
Noticing that it may cost some time when you first cache lines of a file.
Usage:
from linecache_ligth import LineCache
linecache = LineCache('a.txt', cache_suffix='.cache')
num_lines = len(linecache)
line_0 = linecache[0]
line_100 = linecache[100]
'''
def __init__(self, filename, cache_suffix='.cache'):
self.filename = filename
if os.path.exists(self.filename + cache_suffix):
self.st_mtime, self.line_seek = pkl.load(
open(self.filename + cache_suffix, 'rb'))
self.num_lines = len(self.line_seek)
if self.st_mtime != os.stat(self.filename).st_mtime:
print('The cache file is out-of-date')
self.build_seek_index(cache_suffix)
else:
self.build_seek_index(cache_suffix)
self.fhandle = open(self.filename, 'rb', os.O_RDONLY | os.O_NONBLOCK)
def build_seek_index(self, cache_suffix):
print(
"Caching lines informaiton to %s" % (self.filename + cache_suffix))
statinfo = os.stat(self.filename)
self.st_mtime = statinfo.st_mtime
with open(self.filename, 'rb') as f:
self.line_seek = []
while True:
seek_pos = f.tell()
line = f.readline()
if not line: break
self.line_seek.append(seek_pos)
pkl.dump((self.st_mtime, self.line_seek),
open(self.filename + cache_suffix, 'wb'))
self.num_lines = len(self.line_seek)
def __getitem__(self, line_no):
if isinstance(line_no, slice):
return [self[ii] for ii in xrange(*line_no.indices(len(self)))]
elif isinstance(line_no, Iterable):
return [self[ii] for ii in line_no]
else:
if line_no >= self.num_lines:
raise IndexError("Out of index: line_no:%s num_lines: %s" % (
line_no, self.num_lines))
self.fhandle.seek(self.line_seek[line_no])
line = self.fhandle.readline()
return line.decode("utf-8")
def __len__(self):
return self.num_lines
|
#!/usr/bin/env python
from __future__ import absolute_import
__author__ = 'maxim'
from .ensemble import Ensemble, predict_multiple
from .model_io import get_model_info, ModelNotAvailable
|
import unittest
import numpy as np
from utils.utils import *
class TestUtils(unittest.TestCase):
def setUp(self):
self.number_points = 10
self.number_classes = 3
self.dim = 10
self.data = {j: np.random.rand(np.random.randint(10, 100), self.dim) for j in range(self.number_points)}
self.labels = {j: np.random.randint(self.number_classes, size=len(self.data[j])) for j in range(self.number_points - 1)}
self.labels.update({(self.number_points - 1): [self.number_classes] * len(self.data[self.number_points - 1])}) # Add a constant point with not same label
def test_flatten(self):
flatdata, flatlabels = flatten(self.data, self.labels)
self.assertEqual(flatdata.shape[0], np.sum([len(self.data[d]) for d in self.data]))
self.assertEqual(flatdata.shape[1], self.dim)
self.assertEqual(len(flatlabels), len(flatdata))
# Test on flatten data
flatdata, flatlabels = flatten(flatdata, flatlabels)
self.assertEqual(flatdata.shape[0], np.sum([len(self.data[d]) for d in self.data]))
self.assertEqual(flatdata.shape[1], self.dim)
self.assertEqual(len(flatlabels), len(flatdata))
def test_selection(self):
selecteddata, selectedlabels = selection(self.data, self.labels, {"+": 1, "-": 0})
flatdata, flatlabels = flatten(selecteddata, selectedlabels)
self.assertEqual(flatdata.shape[1], self.dim)
self.assertEqual(len(flatlabels), len(flatdata))
self.assertNotIn(self.number_classes - 1, flatlabels)
selecteddata, selectedlabels = selection(self.data, self.labels, [0, 1])
flatdata, flatlabels = flatten(selecteddata, selectedlabels)
self.assertEqual(flatdata.shape[1], self.dim)
self.assertEqual(len(flatlabels), len(flatdata))
self.assertNotIn(self.number_classes - 1, flatlabels)
if __name__ == '__main__':
unittest.main() |
'''
Created by Wang Qiu Li
7/3/2018
transpose nodule data to create more data
'''
import numpy as np
import os
from PIL import Image
import csvTools
datadir = '404026/'
testdir = '/home/wangqiuli/Documents/test/'
def angle_transpose(file,degree,flag_string):
'''
@param file : a npy file which store all information of one cubic
@param degree: how many degree will the image be transposed,90,180,270 are OK
@flag_string: which tag will be added to the filename after transposed
'''
array = np.load(file)
array = array.transpose(2, 1, 0) # from x,y,z to z,y,x
newarr = np.zeros(array.shape,dtype=np.float32)
for depth in range(array.shape[0]):
jpg = array[depth]
jpg.reshape((jpg.shape[0],jpg.shape[1],1))
img = Image.fromarray(jpg)
#img.show()
out = img.rotate(degree)
newarr[depth,:,:] = np.array(out).reshape(array.shape[1],-1)[:,:]
newarr = newarr.transpose(2,1,0)
print(newarr.shape)
np.save(file.replace(".npy",flag_string+".npy"),newarr)
filelist = os.listdir(datadir)
errfile = []
for onefile in filelist:
print(datadir + onefile)
try:
angle_transpose(datadir + onefile, 90, "_leftright")
angle_transpose(datadir + onefile, 180, "_updown")
angle_transpose(datadir + onefile, 270, "_diagonal")
except BaseException:
print(onefile)
errfile.append(onefile)
csvTools.writeTXT('errfile.txt', errfile) |
import torch
from torch import nn
from transformers import BertModel, ElectraModel
from transformers.modeling_bert import BertLayer
from capreolus import ConfigOption, Dependency
from capreolus.reranker import Reranker
class PTParade_Class(nn.Module):
def __init__(self, extractor, config, *args, **kwargs):
super().__init__(*args, **kwargs)
self.extractor = extractor
self.config = config
if config["pretrained"] == "electra-base-msmarco":
self.bert = ElectraModel.from_pretrained("Capreolus/electra-base-msmarco")
elif config["pretrained"] == "bert-base-msmarco":
self.bert = BertModel.from_pretrained("Capreolus/bert-base-msmarco")
elif config["pretrained"] == "bert-base-uncased":
self.bert = BertModel.from_pretrained("bert-base-uncased")
else:
raise ValueError(
f"unsupported model: {config['pretrained']}; need to ensure correct tokenizers will be used before arbitrary hgf models are supported"
)
self.transformer_layer_1 = BertLayer(self.bert.config)
self.transformer_layer_2 = BertLayer(self.bert.config)
self.num_passages = extractor.config["numpassages"]
self.maxseqlen = extractor.config["maxseqlen"]
self.linear = nn.Linear(self.bert.config.hidden_size, 1)
if config["aggregation"] == "max":
raise NotImplementedError()
elif config["aggregation"] == "avg":
raise NotImplementedError()
elif config["aggregation"] == "attn":
raise NotImplementedError()
elif config["aggregation"] == "transformer":
self.aggregation = self.aggregate_using_transformer
input_embeddings = self.bert.get_input_embeddings()
# TODO hardcoded CLS token id
cls_token_id = torch.tensor([[101]])
self.initial_cls_embedding = input_embeddings(cls_token_id).view(1, self.bert.config.hidden_size)
self.full_position_embeddings = torch.zeros(
(1, self.num_passages + 1, self.bert.config.hidden_size), requires_grad=True, dtype=torch.float
)
torch.nn.init.normal_(self.full_position_embeddings, mean=0.0, std=0.02)
self.initial_cls_embedding = nn.Parameter(self.initial_cls_embedding, requires_grad=True)
self.full_position_embeddings = nn.Parameter(self.full_position_embeddings, requires_grad=True)
else:
raise ValueError(f"unknown aggregation type: {self.config['aggregation']}")
def aggregate_using_transformer(self, cls):
expanded_cls = cls.view(-1, self.num_passages, self.bert.config.hidden_size)
# TODO make sure batch size here is correct
batch_size = expanded_cls.shape[0]
tiled_initial_cls = self.initial_cls_embedding.repeat(batch_size, 1)
merged_cls = torch.cat((tiled_initial_cls.view(batch_size, 1, self.bert.config.hidden_size), expanded_cls), dim=1)
merged_cls = merged_cls + self.full_position_embeddings
(transformer_out_1,) = self.transformer_layer_1(merged_cls, None, None, None)
(transformer_out_2,) = self.transformer_layer_2(transformer_out_1, None, None, None)
aggregated = transformer_out_2[:, 0, :]
return aggregated
def forward(self, doc_input, doc_mask, doc_seg):
batch_size = doc_input.shape[0]
doc_input = doc_input.view((batch_size * self.num_passages, self.maxseqlen))
doc_mask = doc_mask.view((batch_size * self.num_passages, self.maxseqlen))
doc_seg = doc_seg.view((batch_size * self.num_passages, self.maxseqlen))
cls = self.bert(doc_input, attention_mask=doc_mask, token_type_ids=doc_seg)[0][:, 0, :]
aggregated = self.aggregation(cls)
return self.linear(aggregated)
@Reranker.register
class PTParade(Reranker):
"""
PyTorch implementation of PARADE.
PARADE: Passage Representation Aggregation for Document Reranking.
Canjia Li, Andrew Yates, Sean MacAvaney, Ben He, and Yingfei Sun. arXiv 2020.
https://arxiv.org/pdf/2008.09093.pdf
"""
module_name = "ptparade"
dependencies = [
Dependency(key="extractor", module="extractor", name="pooledbertpassage"),
Dependency(key="trainer", module="trainer", name="pytorch"),
]
config_spec = [
ConfigOption(
"pretrained", "bert-base-uncased", "Pretrained model: bert-base-uncased, bert-base-msmarco, or electra-base-msmarco"
),
ConfigOption("aggregation", "transformer"),
]
def build_model(self):
if not hasattr(self, "model"):
self.model = PTParade_Class(self.extractor, self.config)
return self.model
def score(self, d):
return [
self.model(d["pos_bert_input"], d["pos_mask"], d["pos_seg"]).view(-1),
self.model(d["neg_bert_input"], d["neg_mask"], d["neg_seg"]).view(-1),
]
def test(self, d):
return self.model(d["pos_bert_input"], d["pos_mask"], d["pos_seg"]).view(-1)
|
# proxy module
from pyface.action.action import *
|
import os.path
from copy import copy
from .base import LightningLoggerBase, rank_zero_only
from test_tube import Experiment
class TestTubeLogger(LightningLoggerBase):
def __init__(
self, save_dir, name="default", debug=False, version=None, create_git_tag=False
):
super().__init__()
self.experiment = Experiment(
save_dir=save_dir,
name=name,
debug=debug,
version=version,
create_git_tag=create_git_tag,
)
@rank_zero_only
def log_hyperparams(self, params):
self.experiment.argparse(params)
@rank_zero_only
def log_metrics(self, metrics, step_num=None):
self.experiment.log(metrics, global_step=step_num)
@rank_zero_only
def save(self):
self.experiment.save()
@rank_zero_only
def finalize(self, status):
self.save()
self.close()
def close(self):
self.experiment.close()
@property
def rank(self):
return self.experiment.rank
@rank.setter
def rank(self, value):
self.experiment.rank = value
@property
def version(self):
return self.experiment.version
# Test tube experiments are not pickleable, so we need to override a few
# methods to get DDP working. See
# https://docs.python.org/3/library/pickle.html#handling-stateful-objects
# for more info.
def __getstate__(self):
state = self.__dict__.copy()
state["experiment"] = self.experiment.get_meta_copy()
return state
def __setstate__(self, state):
self.experiment = state["experiment"].get_non_ddp_exp()
del state['experiment']
self.__dict__.update(state)
|
import machine
import ssd1306
import socket
import time
import json
i2c = machine.I2C(-1, machine.Pin(5), machine.Pin(4))
oled = ssd1306.SSD1306_I2C(128, 32, i2c)
setalarm = [2018, 9, 26, 1, 1, 2, 0, 0]
rtc = machine.RTC()
rtc.datetime((2018, 9, 26, 1, 1, 1, 50, 1))
text = ""
adc = machine.ADC(0)
point = 0
xdata=[]
flag = 0
send = False
labels = ['c', 'o', 'l', 'u', 'm', 'b', 'i', 'a','null']
addr = socket.getaddrinfo('0.0.0.0', 8088)[0][-1]
s = socket.socket()
gesture = False
def sendData(flag, content):
global label
global xdata
print(content)
if flag == 0:
label = 'weather'
elif flag == 1:
label = 'twitter'
else:
label = 'null'
ss = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ss.connect(('18.220.181.241', 8080))
l = {
"label": label,
"n": 0,
"number": len(xdata),
"content": {
"data": {
"x": xdata,
"content": content
}
}
}
l = json.dumps(l).encode()
ss.sendall(l)
data = ss.recv(1024)
print(type(data),data)
data = json.loads(data.decode())
xdata = []
return data
def do_connect():
import network
wlan = network.WLAN(network.STA_IF)
wlan.active(True)
if not wlan.isconnected():
print('connecting to network...')
# wlan.connect(b"iPhone (63)", "12345678")
wlan.connect(b"sun", "12345678")
# wlan.connect(b'MySpectrumWiFid3-2G', "quickacre108")
# wlan.connect(b"sun", "12345678")
# wlan.connect(b'Columbia University')
while not wlan.isconnected():
pass
print('network config:', wlan.ifconfig())
def switchAcallback(p):
global point
time.sleep(0.1)
if p.value() == 1:
point = point + 1
if (point > 9):
point = 0
def switchBcallback(p):
time.sleep(0.1)
global displaytime
global point
global setalarm
temp = list(displaytime)
if point == 7:
setalarm[4] += 1
elif point == 8:
setalarm[5] += 1
else:
temp[point] += 1
rtc.datetime(temp)
print('set')
def switchCcallback(p):
time.sleep(0.1)
global displaytime
global point
global setalarm
temp = list(displaytime)
if point == 7:
setalarm[4] -= 1
elif point == 8:
setalarm[5] -= 1
else:
temp[point] -= 1
rtc.datetime(temp)
def switchAgesture(p):
global flag
time.sleep(0.1)
if p.value() == 1:
flag = 1
def switchCgesture(p):
global send
if p.value() == 1:
send = True
do_connect()
def dp(d):
if (d > 128):
return d - 255
return d
try:
s.bind(addr)
except:
print("An exception occurred")
s.listen(1)
s.settimeout(0.1)
def get_text(s):
for k, c in enumerate(s):
text = ''
if c == '=':
i = k + 1
while i < len(s) and s[i] != " ":
if s[i] == "+":
text += ' '
else:
text += s[i]
i += 1
break
return text
switchA = machine.Pin(0, machine.Pin.IN, machine.Pin.PULL_UP)
switchB = machine.Pin(13, machine.Pin.IN, value=0)
switchC = machine.Pin(2, machine.Pin.IN, machine.Pin.PULL_UP)
switchA.irq(trigger=machine.Pin.IRQ_RISING, handler=switchAcallback)
switchB.irq(trigger=machine.Pin.IRQ_RISING, handler=switchBcallback)
switchC.irq(trigger=machine.Pin.IRQ_RISING, handler=switchCcallback)
spi = machine.SPI(1, baudrate=2000000, polarity=1, phase=1)
cs = machine.Pin(15, machine.Pin.OUT)
cs.value(0)
spi.write(b'\x2d')
spi.write(b'\x2b')
cs.value(1)
cs.value(0)
spi.write(b'\x31')
spi.write(b'\x0f')
cs.value(1)
while True:
oled.fill(0)
try:
cl, addr = s.accept()
except OSError:
pass
else:
content = cl.recv(1024).decode("utf-8")
text = get_text(content)
print(text)
res = 0
if text[0:7] == 'turn on':
oled = ssd1306.SSD1306_I2C(128, 32, i2c)
print("i am in turn on")
res = 1
elif text[0:8] == 'turn off':
oled.poweroff()
print("i am in turn off")
res = 2
elif text[0: len('weather')] == 'weather':
text = sendData(0, "")['body']
res = 3
print(text)
elif text[0: len('post')] == 'post':
print("sending twitter")
text = sendData(1,text[len('post'):])
text = 'post'
res = 4
elif text == 'switch':
gesture = True
switchA.irq(trigger=machine.Pin.IRQ_RISING, handler=switchAgesture)
switchC.irq(trigger=machine.Pin.IRQ_RISING, handler=switchCgesture)
elif text == 'alarm':
gesture = False
switchA.irq(trigger=machine.Pin.IRQ_RISING, handler=switchAcallback)
switchB.irq(trigger=machine.Pin.IRQ_RISING, handler=switchBcallback)
switchC.irq(trigger=machine.Pin.IRQ_RISING, handler=switchCcallback)
response = "HTTP/1.1 200 OK\n" + "Content-Type: text/html\n" + "\n" + "<html><body>Bingo</body></html>\n"
cl.send(str.encode(response))
cl.close()
if gesture:
x = 0
y = 0
sendstatus = "null"
if (flag):
cs.value(0)
test2 = spi.read(5, 0xf3)
cs.value(1)
x = dp(test2[1])
xdata.append(x)
sendstatus = "collect" + str(len(xdata)) + ' ' + ' ' + str(x)
if send:
word = sendData(2,"")
flag = 0
send = False
text = labels[word['body']] + " success"
oled.text(text, 0, 20)
displaytime = rtc.datetime()
oled.text(
str(displaytime[0]) + '/' + str(displaytime[1]) + '/' + str(displaytime[2]) + 'Week:' + str(displaytime[3]), 0,
0)
oled.text(str(displaytime[4]) + ':' + str(displaytime[5]) + ':' + str(displaytime[6]), 0, 10)
oled.text(str(setalarm[4]) + ':' + str(setalarm[5]) + ':' + str(setalarm[6]), 64, 10)
oled.show()
i = adc.read()
oled.contrast(int(i / 4))
if displaytime[4] == setalarm[4] and displaytime[5] == setalarm[5] and displaytime[6] == setalarm[6]:
oled.fill(1)
oled.show()
|
"""Fluke 8508A 8.5 digit DMM"""
import testgear.base_classes as base
class F8508A(base.meter):
def init(self):
self.set_timeout(180)
self.__guard()
self.idstr = self.query("*IDN?").strip()
self.write("TRG_SRCE EXT") #no internal trigger
def get_reading(self, channel=None):
return float(self.query("X?")) #X? is equal to *TRG;RDG?
def select_terminal(self, terminal="FRONT"):
"""select terminal for measurement FRONT, REAR or OFF"""
self.write("INPUT {0}".format(terminal))
def __guard(self, guard=True):
self.write("GUARD INT") #default on power on
#self.write("GUARD EXT")
def conf_function_DCV(self, mrange=None, nplc=200, AutoZero=True, HiZ=True, channel=None):
"""configures the meter to measure DCV. if range=None the meter is set to Autorange"""
if mrange is None:
self.write("DCV AUTO")
else:
self.write("DCV {0:0.3f}".format(mrange))
self.write("DCV RESL7")
self.write("DCV FAST_OFF")
self.write("DCV TWO_WR")
self.write("DCV FILT_OFF")
def conf_function_DCI(self, mrange=None, nplc=100, AutoZero=True, HiZ=True, channel=None):
"""configures the meter to measure DCI. if range=None the meter is set to Autorange"""
if mrange is None:
self.write("DCI AUTO")
else:
self.write("DCI {0:0.3f}".format(mrange))
self.write("DCI RESL7") #7 Digits is maximum for current
self.write("DCI FAST_OFF")
self.write("DCI FILT_OFF")
def conf_function_ACV(self, mrange=None, nplc=100, AutoZero=True, HiZ=True, channel=None):
"""configures the meter to measure DCV. if range=None the meter is set to Autorange"""
if mrange is None:
self.write("ACV AUTO")
else:
self.write("ACV {0:0.3f}".format(mrange))
self.write("ACV RESL6") #6 Digits is maximum for ACV
self.write("ACV TWO_WR")
self.write("ACV TFER_ON")
self.write("ACV FILT40HZ")
#self.write("ACV DCCP") #DC coupled
self.write("ACV ACCP") #AC coupled
def conf_function_ACI(self, mrange=None, nplc=100, AutoZero=True, HiZ=True, channel=None):
"""configures the meter to measure DCV. if range=None the meter is set to Autorange"""
if mrange is None:
self.write("ACI AUTO")
else:
self.write("ACI {0:0.3f}".format(mrange))
self.write("ACI RESL6") #6 Digits is maximum for ACI
self.write("ACI FILT40HZ")
self.write("ACI FILT40HZ")
#self.write("ACI DCCP") #DC coupled
self.write("ACI ACCP") #AC coupled
def conf_function_OHM2W(self, mrange=None, nplc=100, AutoZero=True, OffsetCompensation=False, channel=None):
"""configures the meter to measure DCV. if range=None the meter is set to Autorange"""
if mrange is None:
self.write("OHMS AUTO")
else:
self.write("OHMS {0:0.3f}".format(mrange))
self.write("OHMS RESL7")
self.write("OHMS FAST_OFF")
self.write("OHMS TWO_WR")
self.write("OHMS LOI_OFF")
self.write("OHMS FILT_OFF")
def conf_function_OHM4W(self, mrange=None, nplc=200, AutoZero=True, OffsetCompensation=True, channel=1):
"""configures the meter to measure 4w resistance. if range=None the meter is set to Autorange"""
if not OffsetCompensation and mrange < 20e3: #True Ohms only up to 20kOhm
if mrange is None:
self.write("OHMS AUTO")
else:
self.write("OHMS {0:0.3f}".format(mrange))
self.write("OHMS RESL7")
self.write("OHMS FAST_OFF")
self.write("OHMS FOUR_WR")
self.write("OHMS LOI_OFF")
self.write("OHMS FILT_OFF")
else:
if mrange is None:
self.write("TRUE_OHMS AUTO")
else:
self.write("TRUE_OHMS {0:0.3f}".format(mrange))
self.write("TRUE_OHMS RESL7")
self.write("TRUE_OHMS FAST_OFF")
self.write("TRUE_OHMS FOUR_WR")
self.write("TRUE_OHMS LOI_OFF")
self.write("TRUE_OHMS FILT_OFF")
def conf_function_HV_OHM2W(self, mrange=None, nplc=100, AutoZero=True, OffsetCompensation=False, channel=None):
"""configures the meter to measure DCV. if range=None the meter is set to Autorange"""
if mrange is None:
self.write("HIV_OHMS AUTO")
else:
self.write("HIV_OHMS {0:0.3f}".format(mrange))
self.write("HIV_OHMS RESL7")
self.write("HIV_OHMS FAST_OFF")
self.write("HIV_OHMS TWO_WR")
self.write("HIV_OHMS LOI_OFF")
self.write("HIV_OHMS FILT_OFF")
def conf_function_HV_OHM4W(self, mrange=None, nplc=100, AutoZero=True, OffsetCompensation=False, channel=None):
"""configures the meter to measure DCV. if range=None the meter is set to Autorange"""
if mrange is None:
self.write("HIV_OHMS AUTO")
else:
self.write("HIV_OHMS {0:0.3f}".format(mrange))
self.write("HIV_OHMS RESL7")
self.write("HIV_OHMS FAST_OFF")
self.write("HIV_OHMS FOUR_WR")
self.write("HIV_OHMS LOI_OFF")
self.write("HIV_OHMS FILT_OFF")
|
#!/usr/bin/env python3
"""tests for wod.py"""
import re
import os
import random
import string
from subprocess import getstatusoutput
prg = './search.py'
# --------------------------------------------------
def test_exists():
"""exists"""
assert os.path.isfile(prg)
# --------------------------------------------------
def test_usage():
"""usage"""
for flag in ['-h', '--help']:
rv, out = getstatusoutput('{} {}'.format(prg, flag))
assert rv == 0
assert out.lower().startswith('usage')
# --------------------------------------------------
def random_string():
"""generate a random filename"""
return ''.join(random.choices(string.ascii_lowercase + string.digits, k=5))
# --------------------------------------------------
def test_bad_file():
"""bad file"""
bad = random_string()
rv, out = getstatusoutput('{} {}'.format(prg, bad))
assert rv != 0
assert re.search("No such file or directory: '{}'".format(bad), out)
# --------------------------------------------------
def test_puzzle01():
"""forward horizontal"""
rv, out = getstatusoutput('{} puzzle01.txt'.format(prg))
assert rv == 0
assert out.strip() == '.BC.'
# --------------------------------------------------
def test_puzzle02():
"""backward horizontal"""
rv, out = getstatusoutput('{} puzzle02.txt'.format(prg))
assert rv == 0
assert out.strip() == 'AB.'
# --------------------------------------------------
def test_puzzle03():
"""forward vertical down"""
rv, out = getstatusoutput('{} puzzle03.txt'.format(prg))
assert rv == 0
assert out.strip() == '.B.\n.E.\n.H.'
# --------------------------------------------------
def test_puzzle04():
"""backward vertical down"""
rv, out = getstatusoutput('{} puzzle04.txt'.format(prg))
assert rv == 0
assert out.strip() == '..C\n..F\n..I'
# --------------------------------------------------
def test_puzzle05():
"""forward diagonal down"""
rv, out = getstatusoutput('{} puzzle05.txt'.format(prg))
assert rv == 0
assert out.strip() == 'A..\n.E.\n..I'
# --------------------------------------------------
def test_puzzle06():
"""backward diagonal down"""
rv, out = getstatusoutput('{} puzzle06.txt'.format(prg))
assert rv == 0
assert out.strip() == '...\nD..\n.H.'
# --------------------------------------------------
def test_puzzle07():
"""backward diagonal down"""
rv, out = getstatusoutput('{} puzzle07.txt'.format(prg))
assert rv == 0
assert out.strip() == '.B.\n..F\n...'
# --------------------------------------------------
def test_puzzle08():
"""forward diagonal up"""
rv, out = getstatusoutput('{} puzzle08.txt'.format(prg))
assert rv == 0
assert out.strip() == '..C\n.E.\n...'
# --------------------------------------------------
def test_puzzle09():
"""backward diagonal up"""
rv, out = getstatusoutput('{} puzzle09.txt'.format(prg))
assert rv == 0
assert out.strip() == '...\n..F\n.H.'
# --------------------------------------------------
def test_ice_cream():
"""ice cream"""
expected = """.....CHOCOLATE
.SKCARTESOOM..
.YVANILLA.N...
M.D.T..A..A..A
.A.N.IN...C..E
..P.AAG...E..T
...LNC.E..P.RN
...AE.N.R..E.E
..B..W.O.TE..E
......A.TSA..R
.......LET.I.G
.EGDUF.SN.O.L.
DAORYKCORU.C..
...TUNOCOCT...
""".rstrip()
rv, out = getstatusoutput('{} ice_cream.txt'.format(prg))
assert rv == 0
assert out.strip() == expected
# --------------------------------------------------
def test_shapes():
"""shapes"""
expected = """...C.S.........
..T.UU..S......
..R..BR.P..M...
..I..ME.H.S....
..AENOC.EI.....
..N..HT.R......
..G.TRAPEZOID..
POLYGON.DL....N
PPENTAGON.L..O.
.Y.O..L.IE.IG..
MARGOLELLARAP..
...A...CY.TA.S.
...XM.R.CC..U.E
...E.I..O....Q.
...HC.D.......S
""".rstrip()
rv, out = getstatusoutput('{} shapes.txt'.format(prg))
assert rv == 0
assert out.strip() == expected
|
# @Time : 2020/9/23
# @Author : Yushuo Chen
# @Email : chenyushuo@ruc.edu.cn
# UPDATE
# @Time : 2020/9/23
# @Author : Yushuo Chen
# @email : chenyushuo@ruc.edu.cn
"""
recbole.data.dataloader.user_dataloader
################################################
"""
from recbole.data.dataloader import AbstractDataLoader
from recbole.utils.enum_type import DataLoaderType, InputType
class UserDataLoader(AbstractDataLoader):
""":class:`UserDataLoader` will return a batch of data which only contains user-id when it is iterated.
Args:
config (Config): The config of dataloader.
dataset (Dataset): The dataset of dataloader.
batch_size (int, optional): The batch_size of dataloader. Defaults to ``1``.
dl_format (InputType, optional): The input type of dataloader. Defaults to
:obj:`~recbole.utils.enum_type.InputType.POINTWISE`.
shuffle (bool, optional): Whether the dataloader will be shuffle after a round. Defaults to ``False``.
Attributes:
shuffle (bool): Whether the dataloader will be shuffle after a round.
However, in :class:`UserDataLoader`, it's guaranteed to be ``True``.
"""
dl_type = DataLoaderType.ORIGIN
def __init__(self, config, dataset,
batch_size=1, dl_format=InputType.POINTWISE, shuffle=False):
self.uid_field = dataset.uid_field
super().__init__(config=config, dataset=dataset,
batch_size=batch_size, dl_format=dl_format, shuffle=shuffle)
def setup(self):
"""Make sure that the :attr:`shuffle` is True. If :attr:`shuffle` is False, it will be changed to True
and give a warning to user.
"""
if self.shuffle is False:
self.shuffle = True
self.logger.warning('UserDataLoader must shuffle the data')
@property
def pr_end(self):
return len(self.dataset.user_feat)
def _shuffle(self):
self.dataset.user_feat = self.dataset.user_feat.sample(frac=1).reset_index(drop=True)
def _next_batch_data(self):
cur_data = self.dataset.user_feat[[self.uid_field]][self.pr: self.pr + self.step]
self.pr += self.step
return self._dataframe_to_interaction(cur_data)
|
n = int(input())
for i in range(1,n+1):
n,a,b = map(int, input().split())
x = (2*(180+n))
y = a + b
z = x-y
print(z)
|
# coding=utf-8
"""
@ license: Apache Licence
@ github: invoker4zoo
@ author: invoker/cc
@ wechart: whatshowlove
@ software: PyCharm
@ file: func_timer.py
@ time: $19-3-8 下午5:48
"""
import datetime
from cfnlp.tools.logger import logger
def func_timer(func):
def int_time(*args, **kwargs):
# 程序开始时间
start_time = datetime.datetime.now()
# func process
func(*args, **kwargs)
# 程序结束时间
end_time = datetime.datetime.now()
total_time = (end_time - start_time).total_seconds()
logger.info('程序运行时间总计%s秒' % total_time)
return int_time |
#coding=utf-8
res=[]
for i in range(10,99):
i1=i/10
i2=i%10
if i%11==0:
continue
for j in range(10,i):
j1=j/10
j2=j%10
if (j2==i1 and i2*j==i*j1) or (j1==i2 and i1*j==i*j2):
res.append(str(j)+'/'+str(i))
print res |
# Generated by Django 2.2.9 on 2020-02-01 04:28
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('registrations', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Room',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.IntegerField()),
('name', models.CharField(blank=True, max_length=100, null=True)),
('room_type', models.CharField(choices=[('office', 'Office'), ('faculty_cabin', 'Faculty Cabin'), ('classroom', 'Class Room'), ('workspace', 'Workspace'), ('other', 'Other')], max_length=100)),
('floor', models.IntegerField(choices=[(1, '1'), (2, '2'), (3, '3')])),
('description', models.TextField(blank=True, null=True)),
('occupants', models.ManyToManyField(blank=True, to='registrations.Person')),
],
),
]
|
# coding=utf-8
import object_detection2.config.config as config
from object_detection2.standard_names import *
from object_detection2.engine.defaults import default_argument_parser, get_config_file
from object_detection2.data.dataloader import *
from object_detection2.data.datasets.build import DATASETS_REGISTRY
import tensorflow as tf
import os
from object_detection_tools.predictmodel import PredictModel
import wml_utils as wmlu
import img_utils as wmli
import object_detection_tools.visualization as odv
import numpy as np
from object_detection2.data.datasets.buildin import coco_category_index
from iotoolkit.coco_toolkit import COCOData
from object_detection2.metrics.toolkit import *
slim = tf.contrib.slim
FLAGS = tf.app.flags.FLAGS
CHECK_POINT_FILE_NAME = "data.ckpt"
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = config.get_cfg()
if args.gpus is not None:
gpus = args.gpus
else:
gpus = []
gpus_str = ""
for g in gpus:
gpus_str += str(g) + ","
gpus_str = gpus_str[:-1]
os.environ['CUDA_VISIBLE_DEVICES'] = gpus_str
print(f"Config file {args.config_file}")
config_path = get_config_file(args.config_file)
cfg.merge_from_file(config_path)
cfg.merge_from_list(args.opts)
cfg.log_dir = args.log_dir
cfg.ckpt_dir = args.ckpt_dir
return cfg
def eval_dataset():
data = COCOData()
data.read_data(wmlu.home_dir("ai/mldata/coco/annotations/instances_val2014.json"),
image_dir=wmlu.home_dir("ai/mldata/coco/val2014"))
return data.get_items()
def text_fn(label,probability):
return coco_category_index[label]
def main(_):
is_training = False
args = default_argument_parser().parse_args()
cfg = setup(args)
data_loader = DataLoader(cfg=cfg, is_training=is_training)
data_args = DATASETS_REGISTRY[cfg.DATASETS.TEST]
data, num_classes = data_loader.load_data(*data_args, batch_size=1, is_training=False)
cfg.MODEL.ROI_HEADS.NUM_CLASSES = num_classes
cfg.MODEL.SSD.NUM_CLASSES = num_classes
cfg.MODEL.RETINANET.NUM_CLASSES = num_classes
cfg.MODEL.CENTERNET.NUM_CLASSES = num_classes
cfg.MODEL.YOLACT.NUM_CLASSES = num_classes
cfg.MODEL.FCOS.NUM_CLASSES = num_classes
cfg.DATASETS.NUM_CLASSES = num_classes
cfg.freeze()
config.set_global_cfg(cfg)
model = PredictModel(cfg=cfg,is_remove_batch=True)
model.restoreVariables()
save_path = args.save_data_dir
wmlu.create_empty_dir(save_path,remove_if_exists=True)
metrics = COCOEvaluation(num_classes=90)
items = eval_dataset()
for data in items:
full_path, shape, gt_labels, category_names, gt_boxes, binary_masks, area, is_crowd, num_annotations_skipped = data
img = wmli.imread(full_path)
imgs = np.expand_dims(img,axis=0)
res = model.predictImages(imgs)
if RD_MASKS in res:
r_img = odv.draw_bboxes_and_mask(img,res[RD_LABELS],res[RD_PROBABILITY],res[RD_BOXES],
res[RD_MASKS],
show_text=True)
else:
r_img = odv.bboxes_draw_on_imgv2(img,res[RD_LABELS],res[RD_PROBABILITY],res[RD_BOXES],
text_fn=text_fn,
show_text=True)
kwargs = {}
kwargs['gtboxes'] = gt_boxes
kwargs['gtlabels'] =gt_labels
kwargs['boxes'] = res[RD_BOXES]
kwargs['labels'] = res[RD_LABELS]
kwargs['probability'] = res[RD_PROBABILITY]
kwargs['img_size'] = shape
metrics(**kwargs)
if model.step%100 == 0:
metrics.show()
name = wmlu.base_name(full_path)
img_save_path = os.path.join(save_path,name+".png")
wmli.imwrite(img_save_path,r_img)
metrics.show()
if __name__ == "__main__":
tf.app.run()
|
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 4 07:43:47 2021
@author: lenovo legion
"""
import numpy as np
#from scipy import ndimage as ndi
import cv2
def bwareaopen(im, area):
'''delete small by area elements '''
retval, labels, stats, _ = cv2.connectedComponentsWithStats(im)
idx = np.nonzero(stats[:, 4] < area)[0]
out = np.isin(labels, idx)
im[out] = 0
return im
G123_LUT = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1,
0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0,
1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0,
0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1,
0, 0, 0], dtype=np.bool)
G123P_LUT = np.array([0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0,
1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0,
0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1,
0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0], dtype=np.bool)
LUT_SHRINK1 = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0], dtype=np.bool)
LUT_SHRINK2 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=np.bool)
LUT_ENDPOINTS = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0], dtype=np.bool)
def bwmorph_thin(image):
skel = np.array(image > 0).astype(np.uint8)
mask = np.array([[8, 4, 2],
[16, 0, 1],
[32, 64, 128]], dtype=np.uint8)
while True:
before = np.sum(skel) # count points before thinning
for lut in [G123_LUT, G123P_LUT]:
# correlate image with neighborhood mask
#N = ndi.correlate(skel, mask, mode='constant')
N = cv2.filter2D(skel, -1, mask, borderType=cv2.BORDER_CONSTANT)
# take deletion decision from this subiteration's LUT
D = np.take(lut, N)
# perform deletion
skel[D] = 0
after = np.sum(skel) # coint points after thinning
if before == after:
break
return skel*255
def bwmorph_shrink(image):
'''see gnu octave bwmorph, shrink'''
skel = np.array(image > 0).astype(np.uint16)
mask = np.array([[256, 32, 4],
[128, 16, 2],
[64, 8, 1]], dtype=np.uint16)
while True:
x = skel.copy()
before = np.sum(skel) # count points before thinning
for lut in [LUT_SHRINK1, LUT_SHRINK2]:
# correlate image with neighborhood mask
#N = ndi.correlate(skel, mask, mode='constant')
N = cv2.filter2D(skel, -1, mask, borderType=cv2.BORDER_CONSTANT)
# take deletion decision from this subiteration's LUT
D = np.take(lut, N)
skel = D.astype(np.uint16)
skel = x & skel
after = np.sum(skel) # coint points after thinning
if before == after:
break
return (255*skel).astype(np.uint8)
def bwmorph_endpoints(image):
skel = np.array(image > 0).astype(np.uint16)
mask = np.array([[256, 32, 4],
[128, 16, 2],
[64, 8, 1]], dtype=np.uint16)
while True:
before = np.sum(skel) # count points before thinning
for lut in [LUT_ENDPOINTS]:
# correlate image with neighborhood mask
#N = ndi.correlate(skel, mask, mode='constant')
N = cv2.filter2D(skel, -1, mask, borderType=cv2.BORDER_CONSTANT)
# take deletion decision from this subiteration's LUT
D = np.take(lut, N)
skel = D.astype(np.uint16)
after = np.sum(skel) # coint points after thinning
if before == after:
break
return (255*skel).astype(np.uint8)
def imreconstruct(marker, mask):
kernel2 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
while True:
tmp = marker.copy()
marker = cv2.dilate(marker, kernel2)
marker = cv2.min(mask, marker)
difference = cv2.subtract(marker, tmp)
if cv2.countNonZero(difference) == 0:
break
return marker |
import torch.nn as nn
from transformers.activations import get_activation
import copy
class ElectraClassificationHeadCustom(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, other):
super().__init__()
self.dropout1 = other.dropout
self.dense = other.dense
self.dropout2 = copy.deepcopy(other.dropout)
self.out_proj = other.out_proj
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout1(x)
x = self.dense(x)
x = get_activation("gelu")(
x
) # although BERT uses tanh here, it seems Electra authors used gelu here
x = self.dropout2(x)
x = self.out_proj(x)
return x
|
'''
Created on 15 May 2014
@author: glf12
'''
from distutils.core import setup
setup(name='philips_dcm',
version='1.0',
description='A python interface for Philips MR multiframe dicom files',
author='Gianlorenzo Fagiolo',
url='https://github.com/gianlo/PhilipsMRdicom',
license='License :: OSI Approved :: MIT License',
requires=['numpy', 'dicom'],
packages=['philips_dcm'])
|
import functools
import re
import sys
from ast import literal_eval
from copy import deepcopy
from datetime import datetime
from pathlib import Path
from typing import Any, Callable, List, Optional, Type, TypeVar, Union, cast
from typeguard import check_type
from hesiod.cfg.cfghandler import CFG_T, RUN_NAME_KEY, ConfigHandler
from hesiod.ui import TUI
T = TypeVar("T")
FUNCTION_T = Callable[..., Any]
_CFG: CFG_T = {}
RUN_FILE_NAME = "run.yaml"
OUT_DIR_KEY = "***hesiod_out_dir***"
RUN_NAME_STRATEGY_DATE = "date"
RUN_NAME_DATE_FORMAT = "%Y-%m-%d-%H-%M-%S"
def _parse_args(args: List[str]) -> None:
"""Parse the given args and add them to the global config.
Each arg is expected with the format "{prefix}{key}{sep}{value}".
{prefix} is optional and can be any amount of the char "-".
{key} is a string but cannot contain the chars "-", "=" and ":".
{sep} is mandatory and can be one of "=", ":".
{value} is a string that can contain everything.
Args:
args: The list of args to be parsed.
Raises:
ValueError: If one of the given args is a not supported format.
"""
for arg in args:
pattern = r"^-*(?P<key>[^-=:]+)[=:]{1}(?P<value>.+)$"
match = re.match(pattern, arg)
if match is None:
raise ValueError(f"One of the arg is in a not supported format {arg}.")
else:
while arg[0] == "-":
arg = arg[1:]
key = match.group("key")
value = match.group("value")
try:
value = literal_eval(value)
except (ValueError, SyntaxError):
pass
key_splits = key.split(".")
cfg = _CFG
for key in key_splits[:-1]:
if key not in cfg:
cfg[key] = {}
cfg = cfg[key]
last_key = key_splits[-1]
if last_key not in cfg:
cfg[last_key] = {}
cfg[last_key] = value
def _get_cfg(
base_cfg_path: Path,
template_cfg_path: Optional[Path],
run_cfg_path: Optional[Path],
) -> CFG_T:
"""Load config either from template file or from run file.
Args:
base_cfg_path: The path to the directory with all the config files.
template_cfg_path: The path to the template config file for this run.
run_cfg_path: The path to the config file created by the user for this run.
Raises:
ValueError: If both template_cfg_path and run_cfg_path are None.
Returns:
The loaded config.
"""
if run_cfg_path is not None:
return ConfigHandler.load_cfg(run_cfg_path, base_cfg_path)
elif template_cfg_path is not None:
template_cfg = ConfigHandler.load_cfg(template_cfg_path, base_cfg_path)
tui = TUI(template_cfg, base_cfg_path)
return tui.show()
else:
msg = "Either a valid run file or a template file must be passed to hesiod."
raise ValueError(msg)
def _get_default_run_name(strategy: str) -> str:
"""Get a run name according the given strategy.
Args:
strategy: The strategy to use to create the run name.
Returns:
The created run name.
"""
run_name = ""
if strategy == RUN_NAME_STRATEGY_DATE:
now = datetime.now()
run_name = now.strftime(RUN_NAME_DATE_FORMAT)
return run_name
def _create_out_dir_and_save_run_file(
cfg: CFG_T,
out_dir_root: str,
run_cfg_path: Optional[Path],
) -> None:
"""Create output directory for the current run.
A new directory is created for the current run
and the run file is saved in it (if needed).
Args:
cfg: The loaded config.
out_dir_root: The root for output directories.
run_cfg_path: The path to the config file created by the user for this run.
Raises:
ValueError: If the run name is not specified in the given config.
"""
run_name = cfg.get(RUN_NAME_KEY, "")
if run_name == "":
msg = f"The config must contain a valid name for the run (key={RUN_NAME_KEY})."
raise ValueError(msg)
run_dir = Path(out_dir_root) / Path(run_name)
run_file = run_dir / RUN_FILE_NAME
create_dir = True
if run_cfg_path is not None:
create_dir = run_file.absolute() != run_cfg_path.absolute()
if create_dir:
run_dir.mkdir(parents=True, exist_ok=False)
cfg[OUT_DIR_KEY] = str(run_dir.absolute())
ConfigHandler.save_cfg(cfg, run_file)
def hmain(
base_cfg_dir: Union[str, Path],
template_cfg_file: Optional[Union[str, Path]] = None,
run_cfg_file: Optional[Union[str, Path]] = None,
create_out_dir: bool = True,
out_dir_root: str = "logs",
run_name_strategy: Optional[str] = RUN_NAME_STRATEGY_DATE,
parse_cmd_line: bool = True,
) -> Callable[[FUNCTION_T], FUNCTION_T]:
"""Hesiod decorator for a given function (typically the main).
``hmain`` should be used with only one between ``run_cfg_file`` and ``template_cfg_file``.
If ``run_cfg_file`` is passed, Hesiod will just load the given run file; otherwise,
if ``template_cfg_file`` is passed, Hesiod will create a Text-based User Interface (TUI)
to ask the user to fill/select the values in the given template config.
The ``hmain`` decorator loads the configuration with the right parser (either using the TUI
or not) and runs the decorated function.
Before giving the control back to the decorated function, Hesiod creates a directory named as
the run inside ``out_dir_root`` and saves the loaded config in a single file in it. This can be
disabled with the argument ``create_out_dir``. The default value for ``out_dir_root`` is ``logs``.
If the run has no name (either because it is not provided in the run file or it is not inserted
by the user in the TUI), Hesiod will try to name it according to the ``run_name_strategy``, if
given. ``run_name_strategy`` default is "date", meaning that runs will be named with the date
and time formatted as "YYYY-MM-DD-hh-mm-ss".
By default, Hesiod parses command line arguments to add/override config values. This can be
disabled with the argument ``parse_cmd_line``.
Args:
base_cfg_dir: The path to the directory with all the base config files.
template_cfg_file: The path to the template config file (optional).
run_cfg_file: The path to the run config file created by the user
for this run (optional).
create_out_dir: A flag that indicates whether hesiod should create
an output directory for the run or not (default: True).
out_dir_root: The root for output directories (default: "logs").
run_name_strategy: The strategy to assign a default run name if this is
not specified by user (available options: "date", default: "date").
parse_cmd_line: A flag that indicates whether hesiod should parse args
from the command line or not (default: True).
Raises:
ValueError: If both template_cfg_file and run_cfg_file are None.
ValueError: If hesiod is asked to parse the command line and one
of the args is in a not supported format.
ValueError: If the run name is not specified in the run file
and no default strategy is specified.
Returns:
The given function wrapped in hesiod decorator.
"""
def decorator(fn: FUNCTION_T) -> FUNCTION_T:
@functools.wraps(fn)
def decorated_fn(*args: Any, **kwargs: Any) -> Any:
global _CFG
bcfg_path = Path(base_cfg_dir)
run_cfg_path = Path(run_cfg_file) if run_cfg_file else None
template_cfg_path = Path(template_cfg_file) if template_cfg_file else None
_CFG = _get_cfg(bcfg_path, template_cfg_path, run_cfg_path)
if parse_cmd_line and len(sys.argv) > 1:
_parse_args(sys.argv[1:])
run_name = _CFG.get(RUN_NAME_KEY, "")
if run_name == "" and run_name_strategy is not None:
run_name = _get_default_run_name(run_name_strategy)
_CFG[RUN_NAME_KEY] = run_name
if run_name == "":
msg = (
f"A valid name must be provided for the run. Provide one "
f"by setting a value for the key {RUN_NAME_KEY} or "
f'selecting a default strategy (e.g. "date")'
)
raise ValueError(msg)
if create_out_dir:
_create_out_dir_and_save_run_file(_CFG, out_dir_root, run_cfg_path)
return fn(*args, **kwargs)
return decorated_fn
return decorator
def hcfg(name: str, t: Optional[Type[T]] = None) -> T:
"""Get the requested parameter from the global configuration.
The ``name`` argument is used to identify the requested parameter.
It can be a composition of keys and subkeys separated by dots
(as in ``key.subkey.subsubkey...``), if the requested parameter comes
from nested config dictionaries.
The ``t`` argument is optional and represents the expected Type of the
requested parameter. If given, it allows Hesiod to check that the requested
parameter is of the expected type. Furthermore, it enables proper code
completion, static type checking and similar stuff.
Args:
name: The name of the required parameter.
t: The expected type of the required parameter (optional).
Raises:
TypeError: If ``t`` is not None and the requested parameter is not of the expected type.
Returns:
The requested parameter.
"""
value = _CFG
for n in name.split("."):
value = value[n]
if t is not None:
check_type(name, value, t)
value = deepcopy(value)
return cast(T, value)
def get_cfg_copy() -> CFG_T:
"""Return a copy of the global configuration.
Returns:
A copy of the global configuration.
"""
return deepcopy(_CFG)
def get_out_dir() -> Path:
"""Get the path to the output directory for the current run.
Returns:
The path to the output directory.
"""
out_dir = deepcopy(_CFG[OUT_DIR_KEY])
return Path(out_dir)
def get_run_name() -> str:
"""Get the name of the current run.
Raises:
ValueError: If the current run has no name. If this happens, something went wrong
in the Hesiod protocol. The most likely cause for this error is ``get_run_name()``
being called before that a function wrapped in ``hmain`` is called.
Returns:
The name of the current run.
"""
run_name = _CFG.get(RUN_NAME_KEY, "")
if run_name == "":
raise ValueError("Something went wrong: current run has no name.")
return run_name
|
import sqlalchemy as sqla
import json #only used for user import feature, please remove once that's done
from datetime import datetime, timedelta
import config
meta = sqla.MetaData()
class AlreadyAdded(Exception):
pass
class MyDatabase:
"""Wrapper around sqlalchemy, to bundle objects together."""
def __init__(self, filename):
self.engine = sqla.create_engine('sqlite:///'+filename, echo = True, connect_args={"check_same_thread":False})
#autoload=true in the table definition might save me from defining them every time
#source: https://campus.datacamp.com/courses/introduction-to-relational-databases-in-python/basics-of-relational-databases?ex=9
self.help_users = sqla.Table('help_users', meta,
sqla.Column('id', sqla.Integer, primary_key = True),
sqla.Column('user_id', sqla.Integer),
)
self.users = sqla.Table('users', meta,
sqla.Column('id', sqla.Integer, primary_key=True),
sqla.Column('user_id', sqla.Integer),
sqla.Column('is_admin',sqla.Integer),
sqla.Column('name', sqla.String),
sqla.Column('workweek_start', sqla.Integer),
)
self.ponche = sqla.Table('ponche', meta,
sqla.Column('id', sqla.Integer, primary_key=True),
sqla.Column('user_id', sqla.Integer),
sqla.Column('timestamp', sqla.Integer),
)
self.breaks = sqla.Table('breaks', meta,
sqla.Column('id', sqla.Integer, primary_key=True),
sqla.Column('user_id', sqla.Integer),
sqla.Column('start_time', sqla.Integer),
sqla.Column('expected_length', sqla.Integer),
sqla.Column('end_time', sqla.Integer),
sqla.Column('alarm_channel',sqla.Integer),
)
self.scheduled_breaks = sqla.Table('scheduled_breaks', meta,
sqla.Column('id', sqla.Integer, primary_key=True),
sqla.Column('user_id', sqla.Integer),
sqla.Column('start_time', sqla.Integer),
sqla.Column('expected_length', sqla.Integer),
sqla.Column('weekday', sqla.Integer),
)
meta.create_all(self.engine)
self.conn=self.engine.connect()
def unzip_results(self, result):
# takes a result object, returns a list of dictionaries with each row and column
d, a = {}, []
for row in result:
for column, value in row.items():
d={**d, **{column:value}}
a.append(d)
return(a)
def add_help_user(self, user_id):
ins=self.help_users.insert().values(user_id=user_id)
output=self.conn.execute(ins)
return(output.inserted_primary_key)
def remove_help_user(self, user_id):
remove=self.help_users.delete().where(self.help_users.c.user_id==user_id)
output=self.conn.execute(remove)
return()
def check_help_user(self, user_id):
fetch=self.help_users.select().where(self.help_users.c.user_id==user_id)
result=self.conn.execute(fetch)
a=self.unzip_results(result)
return(a)
def get_help_users(self):
fetch=self.help_users.select()
result=self.conn.execute(fetch)
a=self.unzip_results(result)
return(a)
def get_user(self, user_id, mention=False): #TODO: add check for only one result back
fetch=self.users.select().where(self.users.c.user_id==user_id)
result=self.conn.execute(fetch)
a=self.unzip_results(result)
return(a)
def get_all_users(self):
fetch=self.users.select()
result=self.conn.execute(fetch)
a=self.unzip_results(result)
return(a)
def get_users_who_work_today(self, timestamp):
if type(timestamp)==int or type(timestamp) == float:
timestamp=datetime.fromtimestamp(timestamp)
check=self.users.select()\
.where(self.users.c.workweek_start != (timestamp.weekday()+1)%7)\
.where(self.users.c.workweek_start != (timestamp.weekday()+2)%7)
result=self.conn.execute(check)
a=self.unzip_results(result)
return(a)
def register_user(self,input):
input=json.loads(input)
ins=self.users.insert().values(**input)
output=self.conn.execute(ins)
check=self.users.select().where(self.users.c.id==output.inserted_primary_key[0])
result=self.conn.execute(check)
a=self.unzip_results(result)
return(a)
def get_scheduled_breaks(self,user_id):
raise Exception("function not yet implemented")
return()
def add_ponched_user(self, user_id, timestamp):
#set the bounds for a complete day
start_time=timestamp-(timestamp%86400)
end_time=start_time+86400
check_duplicate = self.ponche.select()\
.where(self.ponche.c.user_id == user_id)\
.where(self.ponche.c.timestamp > start_time)\
.where(self.ponche.c.timestamp < end_time)
duplicate_result = self.conn.execute(check_duplicate)
duplicate_result_list = self.unzip_results(duplicate_result)
if duplicate_result_list:
raise(AlreadyAdded("user is already added"))
ins=self.ponche.insert().values(user_id=user_id, timestamp=timestamp)
output=self.conn.execute(ins)
return(output)
def get_ponched_users(self, timestamp):
#set the bounds for a complete day
start_time=timestamp-(timestamp%86400)
end_time=start_time+86400
if config.verbose:
print(str(start_time), " - ", str(end_time))
check=self.ponche.select()\
.where(self.ponche.c.timestamp > start_time)\
.where(self.ponche.c.timestamp < end_time)
result=self.conn.execute(check)
a=self.unzip_results(result)
return(a)
def send_user_away(self, user_id, timestamp, expected_length, alarm_channel):
ins=self.breaks.insert().values(user_id=user_id,
start_time=timestamp,
expected_length=expected_length,
alarm_channel=alarm_channel
)
output=self.conn.execute(ins)
check=self.breaks.select().where(self.breaks.c.id==output.inserted_primary_key[0])
result=self.conn.execute(check)
a=self.unzip_results(result)
return(a)
def bring_user_back(self, break_id, timestamp):
update=self.breaks.update().values(end_time=timestamp).where(self.breaks.c.id==break_id)
output=self.conn.execute(update)
check=self.breaks.select().where(self.breaks.c.id==output.lastrowid)
result=self.conn.execute(check)
a=self.unzip_results(result)
return(a)
def get_breaks_from_today(self, timestamp):
#set the bounds for a complete day
start_time=timestamp-(timestamp%86400)
end_time=start_time+86400
if config.verbose:
print(str(start_time), " - ", str(end_time))
check=self.breaks.select()\
.where(self.breaks.c.start_time > start_time)\
.where(self.breaks.c.start_time < end_time)
result=self.conn.execute(check)
a=self.unzip_results(result)
return(a)
def get_current_breaks(self):
check=self.breaks.select().where(self.breaks.c.end_time == None)
result=self.conn.execute(check)
a=self.unzip_results(result)
return(a)
db=MyDatabase('beta.db') |
class MapSum(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.d = {}
def insert(self, key, val):
"""
:type key: str
:type val: int
:rtype: None
"""
self.d[key] = val
def sum(self, prefix):
"""
:type prefix: str
:rtype: int
"""
sum1 = 0
for key, val in self.d.items():
if prefix in key[:len(prefix)]:
sum1 += val
return sum1
# Your MapSum object will be instantiated and called as such:
# obj = MapSum()
# obj.insert(key,val)
# param_2 = obj.sum(prefix)
|
# Generated by Django 2.2.4 on 2021-11-12 12:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('function_tools', '0002_registeredfunction_tags'),
]
operations = [
migrations.CreateModel(
name='ImplementationStrategy',
fields=[
('title', models.TextField(verbose_name='расшифровка значения')),
('key', models.CharField(db_index=True, max_length=512, primary_key=True, serialize=False, verbose_name='ключ')),
],
options={
'verbose_name': 'Стратегия создания функции',
'db_table': 'function_tools_implementation_strategy',
},
),
]
|
import csv
import os
import sys
import yaml
class ReadConfig:
def __init__(self, path):
try:
if os.path.isfile(path) is False:
raise OSError(2, "No such file or directory", path)
except OSError as err:
print(err)
self.config_path = path
def csv_config_read(self):
with open(self.config_path) as f:
read_line = csv.reader(f)
config_dict = {line[0]: line[1] for line in read_line}
return config_dict
def yaml_config_read(self):
try:
with open(self.config_path) as f:
read_file = yaml.safe_load(f)
return read_file
except Exception as e:
print(e)
print("Please check your yaml file")
@staticmethod
def separate_rcParams(read_file):
rcParams_dict = {}
for k in read_file.keys():
if "rcParams" in read_file[k]:
rcParams_dict = {**rcParams_dict, **read_file[k]["rcParams"]}
read_file[k].pop("rcParams")
return read_file, rcParams_dict
if __name__ == "__main__":
root_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))
config_path = os.path.join(ROOT_PATH, "config", "config_template.yml")
config = ReadConfig(config_path)
read_yaml = config.yaml_config_read()
read_yaml, rcParams_dict = ReadConfig.separate_rcParams(read_yaml)
print(f"read_yaml: {read_yaml}")
print(f"rcParams_dict: {rcParams_dict}")
|
from .calendar import ModelTestModelCalendarViewSet
from .chart import ModelTestChartViewSet
from .model_test import ModelTestModelViewSet, ModelTestRepresentationViewSet
from .pandas import MyPandasView
from .related_model_test import RelatedModelTestModelViewSet, RelatedModelTestRepresentationViewSet
|
import os
import re
import sys
import glob
import json
import shutil
import mlflow
import logging
import subprocess
import hydra
from hydra import utils
from omegaconf import DictConfig, OmegaConf
from mlflow import log_metric, log_param, log_artifacts, log_artifact
from utils import log_params_from_omegaconf_dict, infer_gpu
SCRIPT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'scripts')
#################### Capture Metrics from Std Output ####################
p1 = r'.*Step (\d+): start running validation on (val|test) split...'
p2 = r'.*score: (0\.\d+|[1-9]\d*\.\d+)'
p3 = r'.*Step (\d+): loss=(0\.\d+|[1-9]\d*\.\d+)'
p4 = r'.*Step (\d+): original loss=(0\.\d+|[1-9]\d*\.\d+) mix loss=(0\.\d+|[1-9]\d*\.\d+)'
step = 0
split = 'val'
def maybe_log_metrics(textline):
if isinstance(textline, bytes):
textline = textline.decode('utf8')
global step
global split
res1 = re.match(p1, textline)
res2 = re.match(p2, textline)
res3 = re.match(p3, textline)
res4 = re.match(p4, textline)
if res1:
step = int(res1[1])
split = res1[2]
if res2:
mlflow.log_metric(f'score-{split}', float(res2[1]), step=step)
if res3:
mlflow.log_metric(f'train_loss', float(res3[2]), step=int(res3[1]))
if res4:
mlflow.log_metric(f'original_loss', float(res4[2]), step=int(res4[1]))
mlflow.log_metric(f'mix_loss', float(res4[3]), step=int(res4[1]))
#################### Main ####################
logger = logging.getLogger(__name__)
@hydra.main(config_path="configs", config_name="config")
def main(cfg):
print(OmegaConf.to_yaml(cfg, resolve=True))
mlflow_cfg = cfg.mlflow
exp_cfg = cfg.configs
scripts_cfg = cfg.scripts
mlflow.set_tracking_uri('file://' + mlflow_cfg.output_dir + '/mlruns')
mlflow.set_experiment(mlflow_cfg.exp_name)
with mlflow.start_run() as run:
run_id = run.info.run_id
save_dir = run.info.artifact_uri.replace('file://', '')
# real output_dir
exp_cfg.output_dir = exp_cfg.output_dir or os.path.join(mlflow_cfg.output_dir, 'exp_results')
exp_cfg.output_dir = os.path.join(exp_cfg.output_dir, mlflow_cfg.task+'-'+run_id)
# save configs
log_params_from_omegaconf_dict(exp_cfg)
config_file = os.path.join(save_dir, 'config.json')
json.dump(OmegaConf.to_container(exp_cfg, resolve=True), open(config_file,'w'))
device = infer_gpu(require_n=mlflow_cfg.num_gpus, no_wait=True)
train_cmd = open(scripts_cfg.train_sh).read()
train_cmd = train_cmd.format(device=device, train_py=scripts_cfg.train_py, config_file=config_file)
# save all code files
for path in mlflow_cfg.code_dirs:
if os.path.isdir(path):
log_artifacts(path, artifact_path=f"code/{os.path.basename(path)}")
elif os.path.isfile(path):
log_artifact(path, artifact_path="code")
# save cmds
cmd_file = os.path.join(save_dir, 'train_cmd.sh')
with open(cmd_file, 'w') as fout:
fout.write(train_cmd)
# run cmd
if mlflow_cfg.debug:
p = subprocess.run(train_cmd, shell=True)
print(f'rm -rf {exp_cfg.output_dir}')
print(f'rm -rf {os.path.dirname(save_dir)}')
# shutil.rmtree(exp_cfg.output_dir)
# shutil.rmtree(os.path.dirname(save_dir))
else:
log_file = os.path.join(save_dir, 'log.txt')
log_output = open(log_file, 'wb')
p = subprocess.Popen(train_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while p.poll() is None:
line = p.stdout.readline()
maybe_log_metrics(line) # capture metrics
if mlflow_cfg.verbose:
print(line.decode('utf8'), end='')
log_output.write(line)
if p.returncode == 0:
log_output.close()
# for json_file in glob.glob(os.path.join(cfg.output_dir, '*.json')):
# log_artifact(json_file, artifact_path="results")
logger.info('Training success')
if __name__ == "__main__":
main() |
from flask import Flask, render_template, request
#from webui import WebUI
from flask_session import Session
from api_key import API_KEY
from datetime import date
from alpha_vantage.timeseries import TimeSeries
#import numpy as np
from math import trunc
#import json
app = Flask(__name__)
#ui = WebUI(app)
app.config["SESSION_PERMANENT"] = False
Session(app)
ts = TimeSeries(key=API_KEY, output_format="pandas")
def split_dataframe(df, chunk_size):
"""
splits a pandas dataframe into a list of sub-dataframes by size of chunk_size
"""
df = df.iloc[::-1] #reverse the dataframe
chunks = list()
for i in range(len(df)): #for amount of days in df
if len(df[i:i+chunk_size]) is chunk_size:
chunks.append(df[i:i+chunk_size])
else:
chunks.append(df[i:])
break;
#print(chunks)
return chunks
def truncate(number, digits) -> float:
stepper = 10.0 ** digits
return trunc(stepper * number) / stepper
@app.route("/", methods=["GET", "POST"])
def index():
if request.method == "GET":
try:
return render_template("index.html", highest_price="-", lowest_price="-", swing="-", swing_percent="-", date_range="-", date_high="-", date_low="-")
except:
return render_template("error.html", error="Error getting index.html")
if request.method == "POST":
#get the form values
symbol = request.form.get('symbol')
startdate = request.form.get('startdate').split('-')
enddate = request.form.get('dt').split('-')
swingduration = request.form.get('swingduration')
#checks for empty form data
if (symbol is '') or (swingduration is ''):
return render_template("error.html",error=f"Got empty form data.")
for x in range(3):
if (startdate[x] is '') or (enddate[x] is ''):
return render_template("error.html",error=f"Got empty date data.")
try:
#convert start/end dates into integers
for i in range(0,len(startdate)):
startdate[i] = int(startdate[i])
for i in range(0,len(enddate)):
enddate[i] = int(enddate[i])
#start and end date converted into readable format for dataframe truncation
strt = date(startdate[0],startdate[1],startdate[2])
end = date(enddate[0],enddate[1],enddate[2])
except:
return render_template("error.html", error=f"Error parsing dates... Invalid date format?")
try:
#load all of the stock data into the pandas dataframe
data, meta_data = ts.get_daily_adjusted(symbol=symbol,outputsize="full")
except ValueError:
return render_template("error.html", error=f"No data for ticker: '{symbol}'. Either the symbol doesn't exist, or the API does not support it.")
try:
#truncate the dataframe to hold only the dat between start and end date
data = data.truncate(before=strt, after=end)
#slice the dataframe into chunks of the request size (given in days) after reversing the order
sliced_data = split_dataframe(data, int(swingduration))
except:
return render_template("error.html", error="error parsing API data into dataframe. This is a bug.")
#find the high/lows of those chunks
try:
largest_swing = -1
largest_swing_df = None
for df in sliced_data:
current_swing = df['4. close'].max() - df['4. close'].min()
if current_swing > largest_swing:
largest_swing = current_swing
largest_swing_df = df
highest_price = largest_swing_df['4. close'].max()
lowest_price = largest_swing_df['4. close'].min()
swing = truncate(largest_swing, 2)
swing_percent = str.join('',f'{truncate(((1-(lowest_price/highest_price))*100), 2)}%')
date_range = f"{sliced_data[0].index[0].date()} to {sliced_data[-1].index[-1].date()}"
swing_date_high = largest_swing_df.idxmax()['4. close'].date()
swing_date_low = largest_swing_df.idxmin()['4. close'].date()
except Exception as e:
return render_template("error.html", error=f"Error calculating swing data. This is a bug.{e}")
return render_template("index.html", highest_price=highest_price, lowest_price=lowest_price, swing=swing, swing_percent=swing_percent, date_range=date_range, date_high=swing_date_high, date_low=swing_date_low)
if __name__ == "__main__":
app.run(debug=True)
|
from fltk import Fl
import torch
from DartDeep.sh_v2.ppo_v2 import PPO
from PyCommon.modules.GUI import hpSimpleViewer as hsv
from PyCommon.modules.Renderer import ysRenderer as yr
import numpy as np
import pydart2 as pydart
def main():
MOTION_ONLY = False
pydart.init()
env_name = 'walk'
ppo = PPO(env_name, 1, visualize_only=True)
if not MOTION_ONLY:
ppo.LoadModel('model/' + env_name + '.pt')
ppo.env.Resets(False)
ppo.env.ref_skel.set_positions(ppo.env.ref_motion.get_q(ppo.env.phase_frame))
# viewer settings
rd_contact_positions = [None]
rd_contact_forces = [None]
dart_world = ppo.env.world
viewer = hsv.hpSimpleViewer(rect=(0, 0, 1200, 800), viewForceWnd=False)
viewer.doc.addRenderer('MotionModel', yr.DartRenderer(ppo.env.ref_world, (150,150,255), yr.POLYGON_FILL))
if not MOTION_ONLY:
viewer.doc.addRenderer('controlModel', yr.DartRenderer(dart_world, (255,240,255), yr.POLYGON_FILL))
viewer.doc.addRenderer('contact', yr.VectorsRenderer(rd_contact_forces, rd_contact_positions, (255,0,0)))
def postCallback(frame):
ppo.env.ref_skel.set_positions(ppo.env.ref_motion.get_q(frame))
def simulateCallback(frame):
state = ppo.env.GetState(0)
action_dist, _ = ppo.model(torch.tensor(state.reshape(1, -1)).float())
action = action_dist.loc.detach().numpy()
res = ppo.env.Steps(action)
# res = ppo.env.Steps(np.zeros_like(action))
# print(frame, ppo.env.ref_skel.current_frame, ppo.env.world.time()*ppo.env.ref_motion.fps)
# print(frame, res[0][0])
# if res[0][0] > 0.46:
# ppo.env.continue_from_now_by_phase(0.2)
if res[2]:
print(frame, 'Done')
ppo.env.reset()
# contact rendering
contacts = ppo.env.world.collision_result.contacts
del rd_contact_forces[:]
del rd_contact_positions[:]
for contact in contacts:
rd_contact_forces.append(contact.f/1000.)
rd_contact_positions.append(contact.p)
if MOTION_ONLY:
viewer.setPostFrameCallback_Always(postCallback)
viewer.setMaxFrame(len(ppo.env.ref_motion)-1)
else:
viewer.setSimulateCallback(simulateCallback)
viewer.setMaxFrame(3000)
viewer.startTimer(1./30.)
viewer.show()
Fl.run()
if __name__ == '__main__':
main()
|
"""
Copyright (c) 2016-2020 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.utils.logging.ylogger import YLogger
from programy.config.base import BaseConfigurationData
from programy.storage.stores.sql.config import SQLStorageConfiguration
from programy.storage.stores.file.config import FileStorageConfiguration
from programy.storage.stores.logger.config import LoggerStorageConfiguration
from programy.storage.stores.nosql.mongo.config import MongoStorageConfiguration
from programy.storage.stores.nosql.redis.config import RedisStorageConfiguration
from programy.storage.factory import StorageFactory
from programy.utils.substitutions.substitues import Substitutions
class StorageConfiguration(BaseConfigurationData):
def __init__(self):
BaseConfigurationData.__init__(self, name="storage")
self._entity_store = {}
self._store_configs = {}
@property
def entity_store(self):
return self._entity_store
@property
def storage_configurations(self):
return self._store_configs
def load_config_section(self, configuration_file, configuration, bot_root, subs: Substitutions = None):
storage = configuration_file.get_section(self._section_name, configuration)
if storage is not None:
entities = configuration_file.get_section("entities", storage)
entity_types = configuration_file.get_child_section_keys("entities", storage)
for entity in entity_types:
entity_config = configuration_file.get_section(entity, entities)
self._entity_store[entity] = entity_config
stores = configuration_file.get_section("stores", storage)
store_names = configuration_file.get_child_section_keys("stores", storage)
for store in store_names:
store_config = configuration_file.get_section(store, stores)
keys = configuration_file.get_keys(store_config)
if 'type' not in keys:
YLogger.error(None, "'type' section missing from client config stores element [%s], "
"ignoring config", store)
continue
if 'config' not in keys:
YLogger.error(None, "'config' section missing from client config stores element [%s], "
"ignoring config", store)
continue
storage_type = configuration_file.get_option(store_config, 'type', subs=subs)
if storage_type == 'sql':
config = SQLStorageConfiguration()
config.load_config_section(configuration_file, store_config, bot_root, subs=subs)
self._store_configs[store] = config
elif storage_type == 'mongo':
config = MongoStorageConfiguration()
config.load_config_section(configuration_file, store_config, bot_root, subs=subs)
self._store_configs[store] = config
elif storage_type == 'redis':
config = RedisStorageConfiguration()
config.load_config_section(configuration_file, store_config, bot_root, subs=subs)
self._store_configs[store] = config
elif storage_type == 'file':
config = FileStorageConfiguration()
config.load_config_section(configuration_file, store_config, bot_root, subs=subs)
self._store_configs[store] = config
elif storage_type == 'logger':
config = LoggerStorageConfiguration()
config.load_config_section(configuration_file, store_config, bot_root, subs=subs)
self._store_configs[store] = config
else:
YLogger.error(self, "Unknown storage configuration type [%s]", storage_type)
else:
YLogger.warning(self, "'storage' section missing from client config, using to defaults")
self._entity_store = {}
StorageConfiguration.add_default_entities(self._entity_store)
self._store_configs = {}
StorageConfiguration.add_default_stores(self._store_configs)
def create_storage_config(self, file=True, sqlite=False, mongo=False, redis=False, logger=False):
config = {}
config['entities'] = {}
StorageConfiguration.add_default_entities(config['entities'], file=file, sqlite=sqlite)
config['stores'] = {}
StorageConfiguration.add_default_stores(config['stores'], file=file, sqlite=sqlite, mongo=mongo, redis=redis, logger=logger)
return config
@staticmethod
def add_default_stores(store_configs, file=True, sqlite=False, mongo=False, redis=False, logger=False):
if sqlite is True:
store_configs['sqlite'] = SQLStorageConfiguration()
if mongo is True:
store_configs['mongo'] = MongoStorageConfiguration()
if redis is True:
store_configs['redis'] = RedisStorageConfiguration()
if file is True:
store_configs['file'] = FileStorageConfiguration()
if logger is True:
store_configs['logger'] = LoggerStorageConfiguration()
@staticmethod
def add_default_stores_as_yaml(store_configs, file=True, sqlite=False, mongo=False, redis=False, logger=False):
if file is True:
store_configs['file'] = {}
store_configs['file']['type'] = 'file'
store_configs['file']['config'] = {}
store = FileStorageConfiguration()
store.to_yaml(store_configs['file']['config'], defaults=True)
if sqlite is True:
store_configs['sqlite'] = {}
store_configs['sqlite']['type'] = 'sql'
store_configs['sqlite']['config'] = {}
store = SQLStorageConfiguration()
store.to_yaml(store_configs['sqlite']['config'], defaults=True)
if mongo is True:
store_configs['mongo'] = {}
store_configs['mongo']['type'] = 'mongo'
store_configs['mongo']['config'] = {}
store = MongoStorageConfiguration()
store.to_yaml(store_configs['mongo']['config'], defaults=True)
if redis is True:
store_configs['redis'] = {}
store_configs['redis']['type'] = 'redis'
store_configs['redis']['config'] = {}
store = RedisStorageConfiguration()
store.to_yaml(store_configs['redis']['config'], defaults=True)
if logger is True:
store_configs['logger'] = {}
store_configs['logger']['type'] = 'logger'
store_configs['logger']['config'] = {}
store = LoggerStorageConfiguration()
store.to_yaml(store_configs['logger']['config'], defaults=True)
@staticmethod
def add_default_entities(entity_store, file=True, sqlite=False):
if sqlite is True:
entity_store[StorageFactory.USERS] = 'sqlite'
entity_store[StorageFactory.LINKED_ACCOUNTS] = 'sqlite'
entity_store[StorageFactory.LINKS] = 'sqlite'
if file is True:
entity_store[StorageFactory.CATEGORIES] = 'file'
entity_store[StorageFactory.ERRORS] = 'file'
entity_store[StorageFactory.DUPLICATES] = 'file'
entity_store[StorageFactory.LEARNF] = 'file'
entity_store[StorageFactory.CONVERSATIONS] = 'file'
entity_store[StorageFactory.MAPS] = 'file'
entity_store[StorageFactory.SETS] = 'file'
entity_store[StorageFactory.RDF] = 'file'
entity_store[StorageFactory.DENORMAL] = 'file'
entity_store[StorageFactory.NORMAL] = 'file'
entity_store[StorageFactory.GENDER] = 'file'
entity_store[StorageFactory.PERSON] = 'file'
entity_store[StorageFactory.PERSON2] = 'file'
entity_store[StorageFactory.REGEX_TEMPLATES] = 'file'
entity_store[StorageFactory.PROPERTIES] = 'file'
entity_store[StorageFactory.DEFAULTS] = 'file'
entity_store[StorageFactory.VARIABLES] = 'file'
entity_store[StorageFactory.TWITTER] = 'file'
entity_store[StorageFactory.SPELLING_CORPUS] = 'file'
entity_store[StorageFactory.LICENSE_KEYS] = 'file'
entity_store[StorageFactory.PATTERN_NODES] = 'file'
entity_store[StorageFactory.TEMPLATE_NODES] = 'file'
entity_store[StorageFactory.BINARIES] = 'file'
entity_store[StorageFactory.BRAINTREE] = 'file'
entity_store[StorageFactory.PREPROCESSORS] = 'file'
entity_store[StorageFactory.POSTPROCESSORS] = 'file'
entity_store[StorageFactory.POSTQUESTIONPROCESSORS] = 'file'
entity_store[StorageFactory.USERGROUPS] = 'file'
entity_store[StorageFactory.TRIGGERS] = 'file'
entity_store[StorageFactory.OOBS] = 'file'
entity_store[StorageFactory.SERVICES] = 'file'
def to_yaml(self, data, defaults=True):
data['entities'] = {}
data['stores'] = {}
if defaults is True:
StorageConfiguration.add_default_entities(data['entities'])
StorageConfiguration.add_default_stores_as_yaml(data['stores'])
else:
data['entities'] = {}
for key, value in self._entity_store.items():
data['entities'][key] = value
for name, value in self._store_configs.items():
data['stores'][name] = {}
data['stores'][name] = value
|
##########################################################################
#
# Copyright (c) 2020, Cinesite VFX Ltd. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from . import _Metadata
from . import _Menus
from ._CellPlugValueWidget import _CellPlugValueWidget
from ._PlugTableModel import _PlugTableModel
from ._RowsPlugValueWidget import _RowsPlugValueWidget
# Value Formatting
# ================
from ._Formatting import registerValueFormatter, formatValue
# Editing
# =======
#
# By default, `PlugValueWidget.create( cell["value"] )` is used to create
# a widget for editing cells in the spreadsheet, but custom editors may be
# provided for specific plug types.
# Registers a function to return a PlugValueWidget for editing cell
# value plugs of the specified type.
def registerValueWidget( plugType, plugValueWidgetCreator ) :
_CellPlugValueWidget.registerValueWidget( plugType, plugValueWidgetCreator )
# Decorations
# ===========
## Registers a function to return a decoration to be shown
# alongside the formatted value. Currently the only supported
# return type is `Color3f`.
def registerDecoration( plugType, decorator ) :
_PlugTableModel.registerDecoration( plugType, decorator )
## Returns the decoration for the specified plug.
def decoration( plug ) :
return _PlugTableModel.decoration( plug )
# Signals
# =======
def addRowButtonMenuSignal() :
return _RowsPlugValueWidget.addRowButtonMenuSignal()
## Signal emitted when the "add column" button is pressed. Slots
# may be connected to customise the menu that is shown, and are
# called with the following arguments :
#
# - `menuDefinition` : The `IECore.MenuDefinition` to be edited.
# - `widget` : The PlugValueWidget for the spreadsheet. Access
# the `RowsPlug` itself via `widget.getPlug()`.
#
# Example :
#
# ```
# def customAddColumnMenu( menuDefinition, widget ) :
#
# def addColumn( rowsPlug ) :
#
# with Gaffer.UndoScope( rowsPlug.ancestor( Gaffer.ScriptNode ) ) :
# rowsPlug.addColumn( Gaffer.StringPlug( "custom", defaultValue = "custom" ) )
#
# menuDefinition.append( "/CustomDivider", { "divider" : True } )
# menuDefinition.append( "/Custom", { "command" : functools.partial( addColumn, widget.getPlug() ) } )
#
# GafferUI.SpreadsheetUI.addColumnButtonMenuSignal().connect( customAddColumnMenu, scoped = False )
# ```
#
# > Tip : The `menuDefinition` will already contain a set of default
# > menu items. These may be removed by calling `menuDefinition.clear()`.
def addColumnButtonMenuSignal() :
return _RowsPlugValueWidget.addColumnButtonMenuSignal()
|
"""Testing facility for conkit.io.PlmDCAIO"""
__author__ = "Felix Simkovic"
__date__ = "26 Oct 2016"
import os
import unittest
from conkit.core.contact import Contact
from conkit.core.contactfile import ContactFile
from conkit.core.contactmap import ContactMap
from conkit.core.sequence import Sequence
from conkit.io.plmdca import PlmDCAParser
from conkit.io.tests.helpers import ParserTestCase
class TestPlmDCAParser(ParserTestCase):
def test_read_1(self):
content = """1,2,0.12212
1,3,0.14004
1,4,0.12926
1,5,0.089211
1,6,0.079976
1,7,0.078954
1,8,0.052275
1,9,0.026012
1,10,0.049844
1,11,0.045109
"""
f_name = self.tempfile(content=content)
with open(f_name, "r") as f_in:
contact_file = PlmDCAParser().read(f_in)
contact_map1 = contact_file.top_map
self.assertEqual(1, len(contact_file))
self.assertEqual(10, len(contact_map1))
self.assertEqual([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [c.res1_seq for c in contact_map1])
self.assertEqual([2, 3, 4, 5, 6, 7, 8, 9, 10, 11], [c.res2_seq for c in contact_map1])
self.assertEqual(
[0.12212, 0.14004, 0.12926, 0.089211, 0.079976, 0.078954, 0.052275, 0.026012, 0.049844, 0.045109],
[c.raw_score for c in contact_map1],
)
def test_write_1(self):
contact_file = ContactFile("RR")
contact_file.target = "R9999"
contact_file.author = "1234-5678-9000"
contact_file.remark = ["Predictor remarks"]
contact_file.method = ["Description of methods used", "Description of methods used"]
contact_map = ContactMap("1")
contact_file.add(contact_map)
for c in [(1, 9, 0, 8, 0.7), (1, 10, 0, 8, 0.7), (2, 8, 0, 8, 0.9), (3, 12, 0, 8, 0.4)]:
contact = Contact(c[0], c[1], c[4], distance_bound=(c[2], c[3]))
contact_map.add(contact)
contact_map.sequence = Sequence("1", "HLEGSIGILLKKHEIVFDGCHDFGRTYIWQMSD")
contact_map.set_sequence_register()
f_name = self.tempfile()
with open(f_name, "w") as f_out:
PlmDCAParser().write(f_out, contact_file)
content = ["1,9,0.700000", "1,10,0.700000", "2,8,0.900000", "3,12,0.400000"]
with open(f_name, "r") as f_in:
output = f_in.read().splitlines()
self.assertEqual(content, output)
if __name__ == "__main__":
unittest.main(verbosity=2)
|
from cklib.args import get_arg_parser, ArgumentParser
from cloudkeeper_plugin_cleanup_expired import CleanupExpiredPlugin
def test_args():
arg_parser = get_arg_parser()
CleanupExpiredPlugin.add_args(arg_parser)
arg_parser.parse_args()
assert ArgumentParser.args.cleanup_expired is False
|
from .base import BaseController
from views import ProfileView
from routes import HOME_ROUTE
from models import Student
import utils
class ProfileController(BaseController):
def __init__(self, router, payload):
super().__init__(router, payload)
self.__view = ProfileView(self)
self.__view.render(
self.process_get_student_profile(self.get_student_profile()))
'''
Queries the database for the Student
profile fields (e.g., first name, last
name, address, phone number, etc.)
@return {dict} Values to be displayed
'''
def get_student_profile(self):
student_id = self.get_payload()['id']
query = (Student
.select(
Student.id,
Student.username,
Student.first_name,
Student.last_name,
Student.sex,
Student.date_of_birth,
Student.age,
Student.address_street,
Student.address_city,
Student.address_state,
Student.address_zip_code,
Student.phone_number,
Student.email
).where(Student.id == student_id)
.dicts())
return query
'''
Processes the results of the
get_student_profile query before handing
it off to the view.
@param query {Student}
return {dict}
'''
def process_get_student_profile(self, query):
profile = []
for student in query:
profile.append({
'full_name': utils.format_name(student['first_name'], student['last_name']),
'sex': utils.get_sex(student['sex']),
'date_of_birth': utils.to_mm_dd_yyyy(student['date_of_birth']),
'phone_number': utils.format_phone_number(student['phone_number']),
'age': str(student['age']),
'id': str(student['id']),
'address_zip_code': str(student['address_zip_code']),
'username': student['username'],
'address_street': student['address_street'],
'address_city': student['address_city'],
'address_state': student['address_state'],
'email': student['email']
})
return profile[0]
'''
Handle the user's choice and redirect
them to the appropriate view.
@param choice {int} Number corresponding to
the view in the ordered list menu.
@param meta {Any} The meta value associated
with the choice.
'''
def on_choice_selection(self, choice, meta):
if choice == 1:
self.go_back()
else:
self.dispatch(HOME_ROUTE)
|
import torch.nn as nn
import torch.nn.init as init
import math
from models.layers.expandergraphlayer import ExpanderLinear,ExpanderConv2d
__all__ = [
'VGGexpander', 'vggexpander11', 'vggexpander11_bn', 'vggexpander13', 'vggexpander13_bn', 'vggexpander16', 'vggexpander16_bn',
'vggexpander19_bn', 'vggexpander19',
]
class VGGexpander(nn.Module):
def __init__(self, features,sparsity):
super(VGGexpander, self).__init__()
self.features = features
self.classifier = nn.Sequential(
ExpanderLinear(512, 512, expandSize=int(512*sparsity/200)),
nn.ReLU(True),
nn.Linear(512, 10),
)
# Initialize weights
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def make_layers(cfg, sparsity,batch_norm=False):
layers = [nn.Conv2d(3, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.ReLU(True)]
in_channels = 64
for i in range(len(cfg)):
if cfg[i] == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
if int(sparsity*cfg[i]/100) < in_channels:
conv2d = ExpanderConv2d(in_channels, cfg[i], expandSize=int(sparsity*cfg[i]/200), kernel_size=3, padding=1)
else:
conv2d = nn.Conv2d(in_channels, cfg[i], kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(cfg[i]), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = cfg[i]
return nn.Sequential(*layers)
cfg = {
'A': ['M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
expandcfg = {
'A': ['M', 32, 'M', 32, 32, 'M', 64, 64, 'M', 64, 64, 'M'],
'B': [16, 'M', 32, 32, 'M', 32, 32, 'M', 64, 64, 'M', 64, 64, 'M'],
'D': [64, 'M', 64, 64, 'M', 16, 16, 16, 'M', 16, 16, 16, 'M', 16, 16, 16, 'M'],
'E': [16, 'M', 32, 32, 'M', 32, 32, 32, 32, 'M', 64, 64, 64, 64, 'M', 64, 64, 64, 64, 'M'],
}
def vggexpander11(sparsity):
"""VGG 11-layer model (configuration "A")"""
return VGGexpander(make_layers(cfg['A'], sparsity=10),sparsity)
def vggexpander11_bn():
"""VGG 11-layer model (configuration "A") with batch normalization"""
return VGGexpander(make_layers(cfg['A'], sparsity=10, batch_norm=True))
def vggexpander13():
"""VGG 13-layer model (configuration "B")"""
return VGGexpander(make_layers(cfg['B'], expandcfg['B']))
def vggexpander13_bn():
"""VGG 13-layer model (configuration "B") with batch normalization"""
return VGGexpander(make_layers(cfg['B'], expandcfg['B'], batch_norm=True))
def vggexpander16(sparsity):
"""VGG 16-layer model (configuration "D")"""
return VGGexpander(make_layers(cfg['D'], sparsity=sparsity),sparsity=sparsity)
def vggexpander16_bn(sparsity):
"""VGG 16-layer model (configuration "D") with batch normalization"""
return VGGexpander(make_layers(cfg['D'], expandcfg['D'], batch_norm=True),sparsity=sparsity)
def vggexpander19():
"""VGG 19-layer model (configuration "E")"""
return VGGexpander(make_layers(cfg['E'], expandcfg['E']))
def vggexpander19_bn():
"""VGG 19-layer model (configuration 'E') with batch normalization"""
return VGGexpander(make_layers(cfg['E'], expandcfg['E'], batch_norm=True))
|
#!/usr/bin/python
# ----------------------------------------------------------------------------
# WRW 7 Mar 2022 - extract-csv-from-html.py
# Taken from do_buffalo.py - Extract a csv file from the raw html file so I
# don't have to ship the raw file, a 28 MByte+ file, with Birdland.
# Remember, the raw file "./Buffalo-Results-2-Mar-2022.html" is obtained by
# a search at the site with an empty search field and saving the resulting
# html file. That can be done into a folder outside this hierarchy
# ----------------------------------------------------------------------------
# WRW 18 Sept 2020 - Explore Buffalo Fake Book Index
# Each index item is 36 lines
# WRW 19 Sept 2020 - Can't assume line number of items, some variation.
# Many items for Tchaikovsky extend over several lines:
# 4 [<strong>Composer(s):</strong> <a href="results.html?composer=Tchaikovsky, Peter Ilich]
# 5 [Tchaikovsky, Peter Ilich">Tchaikovsky, Peter Ilich]
# 6 [Tchaikovsky, Peter Ilich</a>]
# 7 [<br>]
# 4 [<strong>Composer(s):</strong> <a href="results.html?composer=Tchaikovsky, Peter Ilich]
# 5 [Tchaikovsky, Peter Ilich">Tchaikovsky, Peter Ilich]
# 6 [Tchaikovsky, Peter Ilich</a> /]
# 7 [<a href="results.html?composer=Barlow, H.">Barlow, H.</a><br>]
# explore-v2.py - New approach, abandon fixed line.
# ----------------------------------------------------------------------------
import re
import os
import sys
import collections
from pathlib import Path
sys.path.append( "../../bin" )
import fb_utils
import fb_config
# ----------------------------------------------------------------------------
# 0 <h2>'57 Chevrolet</h2>
# 1
# 2
# 3
# 4 <strong>Composer(s):</strong> <a href="results.html?composer=Bowling, Roger">Bowling, Roger</a>
# 5 <br>
# 6
# 7
# 8
# 9
# 10
# 11
# 12
# 13
# 14 <strong>Fakebook:</strong> <a href="results.html?fakebook=Richard Wolfe's Legit Country Fake Book">Richard Wolfe's Legit Country Fake Book</a><br>
# 15
# 16
# 17 <strong>Page:</strong> 37<br>
# 18
# 19
# 20
# 21 Has lyrics <br>
# 22
# 23 <strong>Language:</strong>
# 24
# 25 English
# 26
# 27 <br>
# 28
# 29
# 30
# 31 <strong>Call Number:</strong> M;007;A9;Aa32
# 32
# 33 <hr>
# 34
# 35
# ----------------------------------------------------------------------------
Src = "~/Downloads/Buffalo-Results-2-Mar-2022.html" # Saved from results for search for empty value
Buffalo_csv_file = "./buffalo-index.csv.gz"
Errors = "./Errors.txt"
period = 36 # Number of lines per item
Books = collections.Counter()
Elements = collections.Counter()
Missing = collections.Counter()
Empty = collections.Counter()
omitted_names = set()
included_names = set()
excludes = [ x.strip() for x in Path( 'Local-Exclusions.txt' ).read_text().split('\n') ]
# ----------------------------------------------------------------------------
def show_item( efd, item ):
item = [ line.strip() for line in item ] # Remove leading/trailing whitespace including newlines for error
for i, line in enumerate( item ):
print( f"{i:2d} [{line}]", file = efd )
print( file = efd )
# ----------------------------------------------------------------------------
def parse( efd, name, item, pattern ):
sitem = " ".join( item ) # Combine all lines into one string for search
m = re.search( pattern, sitem, re.DOTALL )
err = False
if not m:
if name != 'lyricist':
print( f"ERROR: no match for {name}", file=efd )
show_item( efd, item )
ret = f'<NO {name}>'
ret = "-"
Missing[ name ] += 1
err = True
else:
ret = m[1]
if not len( ret ):
# print( f"ERROR: {name} empty:", file=efd )
# show_item( item )
ret = f'<EMPTY {name}>'
ret = "--"
Empty[ name ] += 1
err = True
ret = ret.replace( "\n", "" ) # Remove newlines, a few elements extend over multiple lines.
ret = ret.strip()
if not err:
Elements[ name ] += 1
return ret
# ----------------------------------------------------------------------------
def parse_composer( efd, item ):
composer = parse( efd, 'composer', item, "<strong>Composer\(s\):</strong> <a.*?>(.*?)</a>" )
# -----------------------------------------------------------------------------------------
# See if trailing '/' anywhere in item.
# Only composer might have a trailing '/'
found_slash = False
for line in [ line.strip() for line in item ]: # Remove leading/trailing whitespace including newlines for comparision
if len( line ) and line[-1] == '/':
found_slash = True
break
if found_slash:
extra_composer = parse( efd, 'extra_composer', item, "<strong>Composer\(s\):</strong> <a.*?>.*?</a>.*?<a.*?>(.*?)</a>" )
composer = f'{composer} / {extra_composer}'
composer = composer.replace( "\n", "" ) # Remove newlines, a few elements extend over multiple lines.
return composer
# ----------------------------------------------------------------------------
# Process one entry in the html file, i.e. one title.
def proc_item( fb, efd, item ):
title = parse( efd, 'title', item, "<h2>(.*?)</h2>" )
composer = parse_composer( efd, item )
lyricist = parse( efd, 'lyricist', item, "<strong>Lyricist:</strong>\s*(.*?)\n" )
book = parse( efd, 'book', item, "<strong>Fakebook:</strong> <a.*?>(.*?)</a><br>" )
sheet = parse( efd, 'page', item, "<strong>Page:</strong>\s*(.*?)<br>" )
Books[ book ] += 1
# ------------------------------------------------------------------------
# WRW 7 Mar 2022 - Deal with excludes in later step.
# if book not in excludes:
if True:
# Print results for examination / testing
# print( f'{title}\n {composer}\n {lyricist}\n {book}\n {page}' )
item = { 'title' : title,
'composer': composer,
'lyricist': lyricist,
'sheet': sheet,
}
if lyricist != '-' and lyricist != '--':
item[ 'lyricist' ] = lyricist
fb.add( book, item )
included_names.add( book )
else:
excluded_names.add( book )
# ----------------------------------------------------------------------------
# Print summary
def do_summary():
with open( "Summary.txt", "w" ) as sum_fd:
print( "Books:", file=sum_fd )
for book in sorted( Books, key = lambda i: Books[i], reverse=True ):
print( f'{Books[ book ]:4d}', book, file=sum_fd )
print( file=sum_fd )
print( "Elements:", file=sum_fd )
for element in sorted( Elements, key = lambda i: Elements[i], reverse=True ):
print( f'{Elements[ element ]:4d}', element, file=sum_fd )
print( file=sum_fd )
print( "Missing elements:", file=sum_fd )
for element in sorted( Missing, key = lambda i: Missing[i], reverse=True ):
print( f'{Missing[ element ]:4d}', element, file=sum_fd )
print(file=sum_fd )
print( "Empty elements:", file=sum_fd )
for element in sorted( Empty, key = lambda i: Empty[i], reverse=True ):
print( f'{Empty[ element ]:4d}', element, file=sum_fd )
# ----------------------------------------------------------------------------
conf = fb_config.Config()
conf.get_config()
conf.set_class_variables()
fb = fb_utils.FB()
fb.set_classes( conf )
fb.set_class_config()
ipath = Path( Src ).expanduser()
with open( ipath ) as ifd, open( Errors, "w" ) as efd:
prior_line = "<No Prior>"
lines = ifd.readlines()
i = 0
in_contents = False # A little state machine
while i < len( lines ):
line = lines[i]
line = line.strip()
if line == '<div id="content">': # Start at <div id="content">
in_contents = True
if in_contents and line == '</div>':
in_contents = False
if in_contents:
if "<h2>" in line:
item = lines[i:i+period]
proc_item( fb, efd, item ) # *** Bang!
i += period
else:
print( f"WARNING: Line {i+1}, unexpected line: [{line}]", file=efd )
print( f" Prior line: [{prior_line}]", file=efd )
i += 1
else:
i += 1
prior_line = line
# ------------------------------------------------------------------------
do_summary()
fb.save_csv( "Buffalo", "Buf", Buffalo_csv_file )
t = '\n '.join( sorted( included_names ))
print( f"Included books: \n {t}", file=sys.stderr, flush=True )
t = '\n '.join( omitted_names )
print( f"Omitted books: \n {t}", file=sys.stderr, flush=True )
# ----------------------------------------------------------------------------
|
import os
src = open("rsync_final.trace")
dest = open("rsync_actual_final.trace", mode='w+')
taken = {}
ntake = {}
for line in src:
# 0: pc address
# 1: T/N
# 2: target address
split = line.split()
if split[0] in taken:
if split[1] == 'T':
taken[split[0]] += 1
else:
ntake[split[0]] += 1
else:
taken[split[0]] = 0
ntake[split[0]] = 0
src = open("rsync_final.trace")
for line in src:
split = line.split()
dest.write(line.rstrip('\n'))
dest.write(' ')
bias = taken[split[0]] > ntake[split[0]]
dest.write('1' if bias else '0')
dest.write("\n")
src.close()
dest.close()
|
import ray
from pytorch_lightning.accelerators.horovod_accelerator import \
HorovodAccelerator
try:
import horovod.torch as hvd
from horovod.ray import RayExecutor
except (ModuleNotFoundError, ImportError):
HOROVOD_AVAILABLE = False
else:
HOROVOD_AVAILABLE = True
def get_executable_cls():
# Only used for testing purposes, currently.
# We need to override this in tests to ensure test path is set correctly.
return None
class HorovodRayAccelerator(HorovodAccelerator):
"""Pytorch Lightning Accelerator for Horovod training on a Ray cluster.
This accelerator is used to manage distributed training on a Ray cluster
via the Horovod training framework. Internally, the specified number of
Ray actors are launched in the cluster and are configured as part of the
Horovod ring. The Pytorch Lightning trainer is instantiated on the
driver and sent to each of these training workers where training is
executed. The distributed training protocol is handled by Horovod.
Each training worker is configured to reserve 1 CPU and if 1 GPU if
``use_gpu`` is set to ``True``.
If using this accelerator, you should run your code like a normal Python
script: ``python train.py``, and not with ``horovodrun``.
Args:
num_hosts (int): The number of nodes/machines to execute the job on.
num_slots (int): Number of workers to be placed on each machine.
use_gpu (bool): Whether to use GPU for allocation. For GPU to be
used, you must also set the ``gpus`` arg in your Pytorch Lightning
Trainer to a value > 0.
Example:
.. code_block:: python
import pytorch_lightning as ptl
from ray.util.lightning_accelerators import HorovodRayAccelerator
ptl_model = MNISTClassifier(...)
# 2 nodes, 4 workers per node, each using 1 CPU and 1 GPU.
accelerator = HorovodRayAccelerator(num_hosts=2, num_slots=4,
use_gpu=True).
# If using GPUs, set the ``gpus`` arg to a value > 0.
# The actual number of GPUs is determined by ``num_slots``.
trainer = pl.Trainer(..., gpus=1, accelerator=accelerator).
trainer.fit(ptl_model).
"""
def __init__(self,
*args,
num_hosts=1,
num_slots=1,
use_gpu=False,
**kwargs):
super().__init__(*args, trainer=None, **kwargs)
self.nickname = "horovod_ray"
self.num_hosts = num_hosts
self.num_slots = num_slots
self.use_gpu = use_gpu
def setup(self, model):
self.trainer.use_horovod = True
settings = RayExecutor.create_settings(timeout_s=30)
self.executor = RayExecutor(
settings,
num_hosts=self.num_hosts,
num_slots=self.num_slots,
use_gpu=self.use_gpu)
self.trainer.model = model
self.executor.start(executable_cls=get_executable_cls())
def train(self):
trainer = self.trainer
trainer_ref = ray.put(self.trainer)
self.trainer = None
results = self.executor.run(self.train_remote, args=[trainer_ref])
results, state_dict, best_path = results[0]
self.trainer = trainer
self.trainer.model.load_state_dict(state_dict)
if self.trainer.checkpoint_callback:
self.trainer.checkpoint_callback.best_model_path = best_path
return results
def train_remote(self, trainer_ref):
self.trainer = ray.get(trainer_ref)
hvd.init()
if self.trainer.on_gpu:
# Horovod assigns one local GPU per process.
self.trainer.root_gpu = hvd.local_rank()
# TODO: Make changes in PTL to clean this up.
super(HorovodRayAccelerator, self).setup(self.trainer.model)
results = super(HorovodRayAccelerator, self).train()
if hvd.rank() != 0:
# Only want results from the first worker.
return None
best_model_path = None
if self.trainer.checkpoint_callback is not None:
best_model_path = self.trainer.checkpoint_callback.best_model_path
model = self.trainer.model
return results, model.state_dict(), best_model_path
def teardown(self):
self.executor.shutdown()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.