content
stringlengths 5
1.05M
|
|---|
# -*- coding: utf-8 -*-
import pytest
from . import BaseTestSuit, AssertDesc, gen_test_string
TEST_USERNAME = 'user_' + gen_test_string(6)
TEST_PASSWORD = 'test_password'
class TestSuitUser(BaseTestSuit):
API_PATH_ROOT = '/api/v1/users'
def setup_class(self):
pass
def teardown_class(self):
self.do_teardown_class()
def _test_sign_in(self, username, password):
# 测试登录
body = {
'captchaToken': '<TEST>',
'captcha' : '0000',
'signIn': {
'username': username,
'password': password,
},
}
status_code, resp = self.API.post('/api/v1/auth/do/sign-in', body=body)
assert status_code == 200, AssertDesc.bad_resp(resp)
def test_add(self):
data = {
'username': TEST_USERNAME,
'password': TEST_PASSWORD,
'name' : '测试名称',
'roles' : ['user'],
'mobile' : '18600000000',
}
self.do_test_add(data, field_check_ignore=['password'])
# 测试登录
self._test_sign_in(data['username'], data['password'])
def test_modify(self):
data = {
'username': TEST_USERNAME + '_changed',
'password': TEST_PASSWORD + '_changed',
'name' : '测试名称(修改)',
'mobile' : '18600000000(修改)',
}
self.do_test_modify(data, field_check_ignore=['password'])
# 测试登录
self._test_sign_in(data['username'], data['password'])
def test_list(self):
self.do_test_list()
@pytest.mark.order(-1)
def test_delete(self):
self.do_test_delete()
|
# =========================================================================
# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
import numpy as np
from torch.utils import data
from ..datasets.data_utils import load_hdf5
import h5py
from itertools import chain
import torch
class Dataset(data.Dataset):
def __init__(self, darray, with_weights=False):
self.darray = darray
self.with_weights = with_weights
def __getitem__(self, index):
label_index = -2 if self.with_weights else -1
X = self.darray[index, 0:label_index]
y = self.darray[index, label_index]
weight = self.darray[index, -1] if self.with_weights else 1.0
return X, y, weight
def __len__(self):
return self.darray.shape[0]
class DataGenerator(data.DataLoader):
def __init__(self, data_path, batch_size=32, shuffle=False, num_workers=1, with_weights=False, **kwargs):
if type(data_path) == list:
data_path = data_path[0]
data_array = load_hdf5(data_path)
self.with_weights = with_weights
self.dataset = Dataset(data_array, self.with_weights)
super(DataGenerator, self).__init__(dataset=self.dataset, batch_size=batch_size,
shuffle=shuffle, num_workers=num_workers)
self.num_blocks = 1
self.num_batches = int(np.ceil(len(data_array) * 1.0 / self.batch_size))
self.num_samples = len(data_array)
self.num_positives = data_array[:, -1].sum()
self.num_negatives = self.num_samples - self.num_positives
def __len__(self):
return self.num_batches
class DataBlockGenerator(object):
def __init__(self, data_block_list, batch_size=32, shuffle=False, with_weights=False, **kwargs):
# data_block_list: path list of data blocks
self.data_blocks = data_block_list
self.shuffle = shuffle
self.batch_size = batch_size
self.num_blocks = len(self.data_blocks)
self.num_batches, self.num_samples, self.num_positives, self.num_negatives \
= self.count_batches_and_samples()
self.with_weights = with_weights
def iter_block(self, data_block):
darray = load_hdf5(data_block, verbose=False)
label_index = -2 if self.with_weights else -1
X = torch.from_numpy(darray[:, 0:label_index])
y = torch.from_numpy(darray[:, label_index])
weight = torch.from_numpy(darray[:, -1]) if self.with_weights \
else torch.from_numpy(np.ones(y.shape))
block_size = len(y)
indexes = list(range(block_size))
if self.shuffle:
np.random.shuffle(indexes)
for idx in range(0, block_size, self.batch_size):
batch_index = indexes[idx:(idx + self.batch_size)]
yield X[batch_index], y[batch_index], weight[batch_index]
def __iter__(self):
if self.shuffle:
np.random.shuffle(self.data_blocks)
return chain.from_iterable(map(self.iter_block, self.data_blocks))
def __len__(self):
return self.num_batches
def count_batches_and_samples(self):
num_samples = 0
num_positives = 0
num_batches = 0
for block_path in self.data_blocks:
with h5py.File(block_path, 'r') as hf:
data_array = hf[list(hf.keys())[0]][:]
num_samples += len(data_array)
num_positives += np.sum(data_array[:, -1])
num_batches += int(np.ceil(len(data_array) * 1.0 / self.batch_size))
num_negatives = num_samples - num_positives
return num_batches, num_samples, num_positives, num_negatives
|
from rdflib import Graph, RDF
from IPython.core.display import display, HTML
import os
import json
import csv
import uuid
from SPARQLWrapper import SPARQLWrapper, SPARQLWrapper2, JSON, JSONLD, CSV, TSV, N3, RDF, RDFXML, TURTLE
import pandas as pds
import itertools
import numpy as np
from plotnine import *
__author__ = 'proccaserra (Philippe Rocca-Serra)'
# author: philippe rocca-serra (philippe.rocca-serra@oerc.ox.ac.uk)
# ontology: http://www.stato-ontology.org
def queryResultToHTMLTable(queryResult):
HTMLResult = '<table><tr style="color:white;background-color:#43BFC7;font-weight:bold">'
# print variable names
for varName in queryResult.vars:
HTMLResult = HTMLResult + '<td>' + varName + '</td>'
HTMLResult = HTMLResult + '</tr>'
# print values from each row
for row in queryResult:
HTMLResult = HTMLResult + '<tr>'
for column in row:
if column is not "":
HTMLResult = HTMLResult + '<td>' + str(column) + '</td>'
else:
HTMLResult = HTMLResult + '<td>' + "N/A" + '</td>'
HTMLResult = HTMLResult + '</tr>'
HTMLResult = HTMLResult + '</table>'
display(HTML(HTMLResult))
def get_sparql_variables(results, sparql_wrapper="SPARQLWrapper2"):
# return results.vars if ("sparqlwrapper2" == sparql_wrapper.lower()) else results['head']['vars']
return results.vars if ("sparqlwrapper2" == sparql_wrapper.lower()) else results.vars
# print(results.vars)
def get_sparql_bindings(results, sparql_wrapper="SPARQLWrapper2"):
return results.bindings if ("sparqlwrapper2" == sparql_wrapper.lower()) else results['results']['bindings']
def get_sparql_binding_variable_value(binding, variable, sparql_wrapper="SPARQLWrapper2"):
return binding[variable] if ("sparqlwrapper2" == sparql_wrapper.lower()) else binding[variable]['value']
def make_sparql_dict_list(bindings, variables, sparql_wrapper="SPARQLWrapper2"):
def binding_value(binding, var): # helper function for returning values
return get_sparql_binding_variable_value(binding, var, sparql_wrapper) if (var in binding) else None
dict_list = [] # list to contain dictionaries
for binding in itertools.chain(bindings):
values = [binding_value(binding, var) for var in itertools.chain(variables)]
dict_list.append(dict(zip(variables, values)))
return dict_list
def make_sparql_df(results, sparql_wrapper="SPARQLWrapper2"):
# modified from https://github.com/RDFLib/sparqlwrapper/issues/125
variables = get_sparql_variables(results, sparql_wrapper)
cleaned_variables=[str(var.replace('\\n','')) for var in variables]
# print(cleaned_variables)
bindings = get_sparql_bindings(results, sparql_wrapper)
# create a list of dictionaries to use as data for dataframe
data_list = make_sparql_dict_list(bindings, cleaned_variables, sparql_wrapper)
# print(data_list)
df = pds.DataFrame(data_list) # create dataframe from data list
df["sample_mean"] = df["sample_mean"].astype("float")
return df[cleaned_variables] # return dataframe with columns reordered
g = Graph()
g.parse("/Users/philippe/Documents/git/rose2018ng-notebook/data/processed/denovo/rdf/rose-aroma-ng-06-2018-full.ttl", format="n3")
# g.parse("./data/processed/denovo/rdf/rose-aroma-ng-06-2018-full.ttl", format="n3")
print(g)
get_idv_and_levels = g.query(
"""
PREFIX stato: <http://purl.obolibrary.org/obo/STATO_>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX ncbitax: <http://purl.obolibrary.org/obo/NCBITaxon_>
PREFIX has_part: <http://purl.obolibrary.org/obo/BFO_0000051>
SELECT DISTINCT
?Predictor
?PredictorLevel
WHERE {
?var a stato:0000087 ;
rdfs:label ?Predictor ;
has_part: ?value .
?value rdfs:label ?PredictorLevel .
}
"""
)
# for row in get_idv_and_levels:
# print("%s knows %s" % row)
queryResultToHTMLTable(get_idv_and_levels)
get_replication_info = g.query("""
prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>
prefix chmo: <http://purl.obolibrary.org/obo/CHMO_>
prefix msio: <http://purl.obolibrary.org/obo/MSIO_>
prefix stato: <http://purl.obolibrary.org/obo/STATO_>
prefix obi: <http://purl.obolibrary.org/obo/OBI_>
prefix ro: <http://purl.obolibrary.org/obo/RO_>
prefix po: <http://purl.obolibrary.org/obo/PO_>
prefix has_member: <http://purl.obolibrary.org/obo/RO_0002351>
prefix has_value: <http://purl.obolibrary.org/obo/STATO_0000129>
prefix computed_from: <http://purl.obolibrary.org/obo/STATO_0000557>
prefix has_specified_input: <http://purl.obolibrary.org/obo/OBI_0000293>
prefix has_specified_output: <http://purl.obolibrary.org/obo/OBI_0000299>
prefix is_about: <http://purl.obolibrary.org/obo/IAO_0000136>
prefix is_specified_output_of: <http://purl.obolibrary.org/obo/OBI_0000295>
SELECT
?TreatmentGroup
(count(distinct ?member) as ?NbTechnicalReplicate)
(count(distinct ?input) as ?NbBiologicalReplicate)
WHERE {
?population a stato:0000193 ;
rdfs:label ?TreatmentGroup ;
has_member: ?member .
?member has_specified_input: ?input .
?mean a stato:0000402 ;
computed_from: ?population ;
has_value: ?MeanConcentration ;
is_about: ?ChemicalCompound .
?concentration a stato:0000072;
is_specified_output_of: ?assay ;
is_about: ?ChemicalCompound .
}
GROUP BY ?population
""")
queryResultToHTMLTable(get_replication_info)
# get_all_data = g.query("""
# prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>
# prefix chmo: <http://purl.obolibrary.org/obo/CHMO_>
# prefix msio: <http://purl.obolibrary.org/obo/MSIO_>
# prefix stato: <http://purl.obolibrary.org/obo/STATO_>
# prefix obi: <http://purl.obolibrary.org/obo/OBI_>
# prefix ro: <http://purl.obolibrary.org/obo/RO_>
# prefix po: <http://purl.obolibrary.org/obo/PO_>
# prefix has_value: <http://purl.obolibrary.org/obo/STATO_0000129>
# prefix computed_from: <http://purl.obolibrary.org/obo/STATO_0000557>
# prefix is_about: <http://purl.obolibrary.org/obo/IAO_0000136>
# prefix is_denoted_by: <http://purl.obolibrary.org/obo/STATO_0000205>
# prefix derives_from: <http://purl.obolibrary.org/obo/RO_0001000>
# prefix located_in: <http://purl.obolibrary.org/obo/RO_0001025>
# prefix denotes: <http://purl.obolibrary.org/obo/IAO_0000219>
# prefix measured_in: <http://purl.obolibrary.org/obo/STATO_0000XYZ>
#
#
# SELECT DISTINCT ?chemical_name ?chebi_identifier ?inchi ?sample_mean ?sem ?treatment ?genotype ?organism_part
# WHERE {
# ?pop_mean a stato:0000402 ;
# is_about: ?chebi_identifier ;
# computed_from: ?population ;
# has_value: ?sample_mean .
# ?chem a ?chebi_identifier ;
# rdfs:label ?chemical_name ;
# is_denoted_by: ?inchi .
# ?semv a stato:0000037 ;
# denotes: ?pop_mean ;
# has_value: ?sem.
# ?population a stato:0000193 ;
# rdfs:label ?treatment .
# ?sub_conc a stato:0000072 ;
# derives_from: ?genotype ;
# located_in: ?organism_part;
# measured_in: ?population .
#
# }
# """)
#
# queryResultToHTMLTable(get_all_data)
#
# data = make_sparql_df(get_all_data)
# width = figure_size[0]
# height = figure_size[0] * aspect_ratio
# gray = '#666666'
# orange = '#FF8000'
# blue = '#3333FF'
#
# p1 = (ggplot(data)
# + aes('chemical_name','sample_mean',fill='factor(treatment)')
# + geom_col()
# + facet_wrap('~treatment', dir='v',ncol=1)
# + scale_y_continuous(expand = (0,0))
# + theme(axis_text_x=element_text(rotation=90, hjust=1, fontsize=6, color=blue))
# + theme(axis_text_y=element_text(rotation=0, hjust=2, fontsize=6, color=orange))
# + theme(figure_size = (8, 16))
# )
#
# p1 + theme(panel_background=element_rect(fill=blue)
# )
#
# p1
#
#
# ggsave(plot=p1, filename='./figures/denovo/Fig_3c-rose-aroma-naturegenetics2018-from-RDF.png', dpi=100)
#
|
"""dhcp.py"""
import logging
import string
import time
import binascii
from random import randint, choice
from scapy.config import conf
#conf.use_pcap = True
conf.verb = 0
from scapy.arch import linux, pcapdnet
from scapy.arch.pcapdnet import *
#conf.L3socket = linux.L2Socket
from scapy.all import Ether, Dot1Q, IP, UDP, BOOTP, DHCP
from scapy.automaton import *
import packetqueue
global PACKET_QUEUE
PACKET_QUEUE = None
logging.getLogger("scapy").setLevel(1)
logger = logging.getLogger(__name__)
class DHCPClient(Automaton):
'''
'''
BROADCAST_MAC = 'ff:ff:ff:ff:ff:ff'
BROADCAST_IP = '255.255.255.255'
DEFAULT_IP = '0.0.0.0'
BOOTCLIENT = 68
BOOTSERVER = 67
DEBUF_RENEW_TIME = 30
def __setattr__(self, name, value):
logger.debug("Value: %s updated to: %s", name, value)
super(DHCPClient, self).__setattr__(name, value)
@staticmethod
def mac_decode(mac_address):
''' takes a mac address removes . or : turns it into hex'''
new_mac = mac_address.replace(":", "").replace(".", "")
logger.debug("Stripped mac_address, old: %s new: %s", mac_address, new_mac)
return new_mac.decode('hex')
@staticmethod
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(choice(chars) for _ in range(size))
@staticmethod
def random_int():
return randint(0, 2**32-1)
@staticmethod
def pad_zero(data):
if len(data) < 2:
data = '0' + data
return data
@staticmethod
def encode_string(string_data):
temp = []
for char in string_data:
new_hex = '{:x}'.format(ord(char))
temp.append(DHCPClient.pad_zero(new_hex))
length = DHCPClient.pad_zero('{:x}'.format(len(temp)))
return length + ''.join(temp)
def start_server(self):
self.runbg()
def stop_server(self):
self.stop_state = True
PACKET_QUEUE.stop()
def server_status(self):
return self.server_state
def _server_update_state(self, server_state):
self._state_update_parent(server_state)
self.server_state = server_state
def _state_update_parent(self, server_state):
''' Override with parent class method to update state'''
pass
def parse_args(self, interface, mac_address, hostname=None, broadcast=False,
early_renew=0, early_rebind=0,
no_renew=False, quick_start=False, dhcp_options=[],
vlan_tags=[], option82=None, dsl_sub_options=[], debug=100, **kargs):
self.send_socket_kargs = {}
Automaton.parse_args(self, **kargs)
self.debug_level = 2
#self.socket_kargs["ll"] = conf.L2socket
self.interface = interface
conf.iface = self.interface
if not PACKET_QUEUE:
PACKET_QUEUE = packetqueue.PacketQueue(iface=conf.iface)
global PACKET_QUEUE
self.send_sock_class = conf.L2socket
self.recv_sock_class = packetqueue.DHCPListenSocket
#self.send_sock_class = pcapdnet.L2pcapSocket
#self.recv_sock_class = pcapdnet.L2pcapListenSocket
self.send_socket_kargs['iface'] = self.interface
self.mac_address = mac_address
self.hostname = hostname
self.broadcast = broadcast
self.early_renew = early_renew
self.early_rebind = early_rebind
self.no_renew = no_renew
self.quick_start = quick_start
self.dhcp_options = dhcp_options
self.vlan_tags = vlan_tags
self.option82 = option82
self.dsl_sub_options = dsl_sub_options
if not self.hostname: self.hostname = DHCPClient.id_generator()
self.logger = logging.getLogger(self.hostname)
self.xid = 0
self.flags = 0
self.t1 = 0
self.t2 = 0
self.siaddr = '0.0.0.0'
self.yiaddr = '0.0.0.0'
self.ciaddr = '0.0.0.0'
self.renew_attempts = 0
self.rebind_attempts = 0
self.stop_state = False
self.server_state = 'Stopped'
if self.broadcast: self.flags = 32768
self.raw_mac = DHCPClient.mac_decode(self.mac_address)
self.logger.debug("Timeout for states are: %s", self.timeout)
def my_send(self, pkt):
self.send_sock.send(pkt)
def master_filter(self, pkt):
''' '''
return ( Ether in pkt and pkt[Ether].src != self.mac_address and (BOOTP in pkt and pkt[BOOTP].xid == self.xid) )
def get_dot1q(self, vlan):
return Dot1Q(vlan=vlan)
def get_option82(self):
send = False
if self.option82:
hex_subscriber_id = binascii.unhexlify('01' + DHCPClient.encode_string(self.option82))
hex_remote_id = binascii.unhexlify('02' + DHCPClient.encode_string('BRASTEST'))
send = True
else:
hex_subscriber_id = ''
hex_remote_id = ''
if len(self.dsl_sub_options) == 2:
sup_option_header = binascii.unhexlify('0911' + '{0:08X}'.format(3561) + '0C')
actual_up = binascii.unhexlify('8104' + '{0:08X}'.format(self.dsl_sub_options[0]))
actual_down = binascii.unhexlify('8204' + '{0:08X}'.format(self.dsl_sub_options[1]))
send = True
else:
sup_option_header = ''
actual_up = ''
actual_down = ''
if send:
return [('relay_agent_Information', hex_subscriber_id + hex_remote_id + sup_option_header + actual_up + actual_down)]
return []
def dhcp_add_options(self, header_options):
self.logger.debug("dhcp options ")
try:
full_options = header_options + self.dhcp_options + self.get_option82() + ['end']
except:
self.logger.exception("dhcp_options what!")
self.logger.debug("dhcp options %s", full_options)
return DHCP(options=full_options)
def get_l2_transport(self, src_mac, dst_mac):
ethernet = Ether(src=src_mac, dst=dst_mac)
for vlan in self.vlan_tags:
ethernet = ethernet / self.get_dot1q(vlan)
return ethernet
def get_transport(self, src_mac, dst_mac, src_ip, dst_ip):
ethernet = self.get_l2_transport(src_mac, dst_mac)
ip_header = IP(src=src_ip, dst=dst_ip)
udp_header = UDP(sport=self.BOOTCLIENT, dport=self.BOOTSERVER)
return ethernet/ip_header/udp_header
# State machine.
#INIT - Init
@ATMT.state(initial=1)
def Init(self):
''' '''
if self.stop_state: raise self.unbound_end()
self._server_update_state("Unbound")
self.logger.info("DHCP Client started for MAC %s", self.mac_address)
l2_transport = self.get_transport(self.mac_address,
self.BROADCAST_MAC,
self.DEFAULT_IP,
self.BROADCAST_IP)
self.xid = DHCPClient.random_int()
self.logger.info("XID set to: %s", self.xid)
self.listen_sock = packetqueue.DHCPListenSocket(xid=self.xid, packet_queue_class=PACKET_QUEUE)
if self.quick_start:
logging.debug("Quick startup enabled, skipping random desync")
else:
desync_time = randint(1,30)
logging.debug("Waiting for desync time to expire in %ss", desync_time)
time.sleep(desync_time)
logging.debug("desync time expired, Sending Discover")
bootp_header = BOOTP(flags=self.flags,chaddr=self.raw_mac,xid=self.xid)
dhcp_header = self.dhcp_add_options([('message-type', 'discover')])
packet = l2_transport/bootp_header/dhcp_header
self.logger.info("Sending Discover: %s", packet.sprintf('%Ether.src% > %Ether.dst% %Dot1Q.vlan% %IP.src% > %IP.dst% %BOOTP.xid%'))
self.logger.debug("Sending Discover: %s", packet.show(dump=True))
self.send(packet)
raise self.Selecting()
@ATMT.state()
def Rebooting(self):
self.siaddr = '0.0.0.0'
self.yiaddr = '0.0.0.0'
self.ciaddr = '0.0.0.0'
raise self.Init()
#SELECTING - Selecting
@ATMT.state()
def Selecting(self):
self.logger.info("Moved to state Selecting")
@ATMT.timeout(Selecting, 15)
def Selecting_timeout(self):
self.logger.info("No repsonse back in 15 seconds heading back to Init state")
raise self.Init()
@ATMT.state()
def Requesting(self):
self.logger.info("Moved to state Requesting")
l2_transport = self.get_transport(self.mac_address,
self.BROADCAST_MAC,
self.DEFAULT_IP,
self.BROADCAST_IP)
bootp_header = BOOTP(flags=self.flags,chaddr=self.raw_mac,xid=self.xid)
dhcp_header = DHCP(options=[("message-type","request"),
("server_id",self.siaddr),
("requested_addr",self.yiaddr),
("hostname",self.hostname),
("param_req_list","pad"),
"end"])
for option in self.dhcp_options:
dhcp_header.options.append(option)
packet = l2_transport/bootp_header/dhcp_header
self.logger.info("Requesting: %s", packet.sprintf('%Ether.src% > %Ether.dst% VLAN:%Dot1Q.vlan% %IP.src% > %IP.dst% BOOTPXID:%BOOTP.xid%'))
self.logger.debug("Requesting: %s", packet.show(dump=True))
self.send(packet)
@ATMT.state()
def Bound(self):
self._server_update_state("Bound")
self.logger.info("Moved to state Bound with ip: %s", self.ciaddr)
time_now = time.time()
while time_now < self.lease_expire_time:
if self.stop_state: raise self.bound_end()
if not self.broadcast or not self.no_renew:
if self.early_renew > 0 and self.early_renew < self.t1:
if time_now > self.early_renew_expire_time:
raise self.Renewing()
if time_now > self.t1_expire_time:
raise self.Renewing()
if time_now > self.t2_expire_time:
raise self.Rebinding()
elif (self.early_rebind > 0 and self.early_rebind < self.t2) and time_now > self.early_rebind_expire_time:
raise self.Rebinding()
time.sleep(1)
time_now = time.time()
raise self.Rebooting()
@ATMT.state()
def Renewing(self):
self.logger.info("Moved to state Renewing")
back_off_time = randint(1, self.DEBUF_RENEW_TIME) * self.renew_attempts
self.logger.info("Backing off %ss", back_off_time)
time.sleep(back_off_time)
l2_transport = self.get_transport(self.mac_address,
self.server_mac,
self.yiaddr,
self.siaddr)
bootp_header = BOOTP(flags=self.flags,ciaddr=self.yiaddr,chaddr=self.raw_mac,xid=self.xid)
dhcp_header = DHCP(options=[("message-type","request"),
("hostname",self.hostname),
"end"])
packet = l2_transport/bootp_header/dhcp_header
self.logger.info("Renewing: %s", packet.sprintf('%Ether.src% > %Ether.dst% VLAN:%Dot1Q.vlan% %IP.src% > %IP.dst% BOOTPXID:%BOOTP.xid%'))
self.logger.debug("Renewing: %s", packet.show(dump=True))
self.send(packet)
self.renew_attempts += 1
@ATMT.state()
def Rebinding(self):
self.logger.info("Moved to state Rebinding")
back_off_time = randint(1, self.DEBUF_RENEW_TIME) * self.rebind_attempts
self.logger.debug("Backing off %ss", back_off_time)
time.sleep(back_off_time)
l2_transport = self.get_transport(self.mac_address,
self.BROADCAST_MAC,
self.yiaddr,
self.BROADCAST_IP)
bootp_header = BOOTP(flags=self.flags,ciaddr=self.yiaddr,chaddr=self.raw_mac,xid=self.xid)
dhcp_header = DHCP(options=[("message-type","request"),
("hostname",self.hostname),
"end"])
packet = l2_transport/bootp_header/dhcp_header
self.logger.info("Rebinding: %s", packet.sprintf('%Ether.src% > %Ether.dst% VLAN:%Dot1Q.vlan% %IP.src% > %IP.dst% BOOTPXID:%BOOTP.xid%'))
self.logger.debug("Rebinding: %s", packet.show(dump=True))
self.send(packet)
self.rebind_attempts += 1
@ATMT.timeout(Requesting, 30)
def Requesting_timeout(self):
self.logger.info("No repsonse back in 10 seconds heading back to Init state")
raise self.Init()
@ATMT.timeout(Renewing, 5)
def waiting_renewing_response_timeout(self):
self.logger.info("No repsonse back in 5 seconds heading back to Bound state")
raise self.Bound()
@ATMT.timeout(Rebinding, 5)
def waiting_rebinding_response_timeout(self):
self.logger.info("No repsonse back in 5 seconds heading back to Bound state")
raise self.Bound()
# State conditions and actions.
@ATMT.receive_condition(Selecting)
def received_offer(self, pkt):
self.last_pkt = pkt
self.logger.debug("Selecting condition")
raise self.Requesting()
@ATMT.receive_condition(Requesting)
def recieved_packet_request(self, pkt):
self.last_pkt = pkt
raise self.Bound()
@ATMT.receive_condition(Bound)
def recieved_packet_bound(self, pkt):
self.last_pkt = pkt
raise self.Bound()
@ATMT.receive_condition(Renewing)
def recieved_packet_renewing(self, pkt):
self.last_pkt = pkt
raise self.Bound()
@ATMT.receive_condition(Rebinding)
def recieved_packet_rebinding(self, pkt):
self.last_pkt = pkt
raise self.Bound()
@ATMT.action(received_offer)
@ATMT.action(recieved_packet_request)
@ATMT.action(recieved_packet_bound)
@ATMT.action(recieved_packet_renewing)
@ATMT.action(recieved_packet_rebinding)
def recieved_packet(self):
pkt = self.last_pkt
if (UDP in pkt and BOOTP in pkt):
self.logger.info("recieved_packet: %s", pkt.sprintf('%Ether.src% > %Ether.dst% VLAN:%Dot1Q.vlan% %IP.src% > %IP.dst% BOOTPXID:%BOOTP.xid%'))
self.logger.debug("recieved_packet: %s", pkt.show(dump=True))
if pkt[BOOTP].xid != self.xid:
self.logger.warning("XID does not match! going to Init state, packet=%s, us=%s", pkt[BOOTP].xid, self.xid)
elif ("message-type", 2) in pkt[DHCP].options: # OFFER
self.siaddr = pkt[BOOTP].siaddr
self.yiaddr = pkt[BOOTP].yiaddr
self.server_mac = pkt[Ether].src
for opt in pkt[DHCP].options:
if opt[0] == 'server_id':
self.siaddr = opt[1]
raise self.Requesting()
elif ("message-type", 5) in pkt[DHCP].options: # ACK
time_now = time.time()
self.ciaddr = self.yiaddr
for opt in pkt[DHCP].options:
if opt[0] == 'renewal_time':
self.t1 = int(opt[1])
elif opt[0] == 'rebinding_time':
self.t2 = int(opt[1])
elif opt[0] == 'lease_time':
self.lease_time = int(opt[1])
self.t1_expire_time = time_now + self.t1
self.early_renew_expire_time = time_now + self.early_renew
self.t2_expire_time = time_now + self.t2
self.lease_expire_time = time_now + self.lease_time
self.early_rebind_expire_time = time_now + self.early_rebind
self.rebind_attempts = 0
self.renew_attempts = 0
raise self.Bound()
elif ("message-type", 6) in pkt[DHCP].options: # NACK
self.logger.info("Got NACK Rebooting")
self._update_state("Unbound")
raise self.Rebooting()
self.logger.error("Packet was fucked")
@ATMT.state()
def bound_end(self):
self.logger.debug("Moved to state Bounded Ending")
l2_transport = self.get_transport(self.mac_address,
self.server_mac,
self.yiaddr,
self.siaddr)
bootp_header = BOOTP(flags=self.flags,ciaddr=self.yiaddr,chaddr=self.raw_mac,xid=self.xid)
dhcp_header = DHCP(options=[("message-type","release"),
("hostname",self.hostname),
"end"])
packet = l2_transport/bootp_header/dhcp_header
self.logger.info("Bound Ending: %s", packet.sprintf('%Ether.src% > %Ether.dst% VLAN:%Dot1Q.vlan% %IP.src% > %IP.dst% BOOTPXID:%BOOTP.xid%'))
self.logger.debug("Bound End: %s", packet.show(dump=True))
self.send(packet)
raise self.END()
@ATMT.state()
def unbound_end(self):
raise self.END()
@ATMT.state(final=1)
def END(self):
self._server_update_state("Stopped")
self.logger.info("Client stopped")
if __name__ == "__main__":
import sys
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
a = DHCPClient(sys.argv[1], sys.argv[2], quick_start=True, vlan_tags=[2001], option82='AVC999904444404')
try:
a.start_server()
while True:
pass
except KeyboardInterrupt:
a.stop_server()
while True:
if a.server_status() == "Stopped":
sys.exit()
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from django_addons_formlib import __version__
setup(
name='django-addons-formlib',
version=__version__,
description='django-addons Framework form library',
author='Divio AG',
author_email='info@divio.ch',
url='https://github.com/django-addons/django-addons-formlib',
packages=find_packages(),
python_requires='>=3.6',
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Programming Language :: Python :: 3.6',
'Framework :: Django :: 2.0',
],
)
|
# %%
import pandas as pd
import numpy as np
import pathlib
import matplotlib.pyplot as plt
from scipy.stats.mstats import gmean
from our_plot_config import derived_dir, setplotstyle, fig_dir
setplotstyle()
# %%
# Input file
f_kappas = derived_dir / 'official-kappas.parquet'
f_firms = derived_dir / 'firm-info.parquet'
# temp output - for macro simulations
f_quarter_mean = derived_dir / 'tmp-quarter-mean.pickle'
# Figures
# Kappa
f_profitweights = fig_dir / 'figure1_kappa.pdf'
f_profitweights_all = fig_dir / 'figure13_kappa_control.pdf'
f_within_between = fig_dir / 'figure15_within_between.pdf'
f_kappa_quantile = fig_dir / 'appfigure_a6.pdf'
# Concentration
f_ihhi = fig_dir / 'figure6_ihhi.pdf'
f_cosine = fig_dir / 'figure7_cosine.pdf'
f_chhi = fig_dir / 'figure14_chhi1.pdf'
f_chhi2 = fig_dir / 'figure14_chhi2.pdf'
# Tunneling
f_tunneling = fig_dir / 'figure9_tunneling.pdf'
f_kap1 = fig_dir / 'appfigure_a7.pdf'
# compute weighted average for kappa with different weighting schemes
def weighted(x, cols):
a1 = np.average(x[cols].values, weights=x['w_amean'].values, axis=0)[0]
a2 = np.average(x[cols].values, weights=x['w_gmean'].values, axis=0)[0]
a3 = np.average(x[cols].values, weights=x['mkt_cap_to'].values, axis=0)[0]
a4 = np.average(
x[cols].values,
weights=x['mkt_cap_from'].values,
axis=0)[0]
a5 = np.average(x[cols].values, weights=x['saleq_x'].values, axis=0)[0]
a6 = np.average(x[cols].values, weights=x['saleq_y'].values, axis=0)[0]
a7 = np.average(x[cols].values, weights=x['w_s_gmean'].values, axis=0)[0]
return pd.Series({'kappa_amean': a1, 'kappa_gmean': a2, 'kappa_from': a3, 'kappa_to': a4,
'kappa_sale_from': a4, 'kappa_sale_to': a4, 'kappa_sale_mean': a4})
# ### Read in the (Cleaned) Parquet File
# - Apply the $\kappa$ calculations period by period
# - Save the output to a new parquet file
# %%
df = pd.read_parquet(f_kappas)
df_firm = pd.read_parquet(f_firms)
ihhi = df_firm[['permno', 'quarter', 'ihhi', 'siccd', 'saleq']]
# merge to get weights (sales and market cap, from/to)
total_df = pd.merge(
pd.merge(
df, ihhi, left_on=[
'from', 'quarter'], right_on=[
'permno', 'quarter'], how='left'),
ihhi, left_on=['to', 'quarter'], right_on=['permno', 'quarter'], how='left')
total_df['same_sic'] = (total_df['siccd_x'] == total_df['siccd_y'])
total_df[total_df['from'] != total_df['to']]
# Average of weights
total_df['w_amean'] = (total_df['mkt_cap_from'] + total_df['mkt_cap_to']) / 2.0
total_df['w_gmean'] = gmean(
[total_df['mkt_cap_from'], total_df['mkt_cap_to']], axis=0)
total_df['w_s_gmean'] = gmean(
[total_df['saleq_x'], total_df['saleq_y']], axis=0)
# Apply the weighted averages
y = total_df.groupby(['quarter']).apply(weighted, ["kappa"])
qtr_mean = pd.concat([total_df.groupby(['quarter']).mean(), y], axis=1)
df_cosine = total_df.groupby(
['quarter'])['cosine'].describe(
percentiles=[
0.05, 0.25, 0.5, 0.75, 0.95])
# Percentiles of Kappa and IHHI
kappa_pct = df.groupby(
['quarter'])['kappa'].describe(
percentiles=[
0.05,
0.25,
0.5,
0.75,
0.95])
ihhi_pct = ihhi[~ihhi.ihhi.isnull()].groupby(['quarter'])['ihhi'].describe(
percentiles=[0.05, 0.25, 0.5, 0.75, 0.95])
# drop k_ff =1 cases for tunneling
tunnel_df = (df[df['from'] != df['to']].set_index('quarter')[
['kappa_sqrt', 'kappa', 'kappa_pow2', 'kappa_pow3']] > 1).groupby(level=0).mean()
tunnel_df2 = (df[df['from'] != df['to']].set_index(['from', 'quarter'])[
['kappa_sqrt', 'kappa', 'kappa_pow2', 'kappa_pow3']] > 1).groupby(level=[0, 1]).max()
# %%
# need this for the macro simulations
qtr_mean.to_pickle(f_quarter_mean)
# %%
# ### Kappas
# - Single Kappa ( Figure 1)
# - Alternative Control (Figure 13)
# - Within and Between Industry (Figure 15)
# Alternate Figure 1 (revision)
plt.clf()
qtr_mean[['kappa', 'kappa_gmean', 'kappa_sale_mean']].plot(figsize=(20, 10))
plt.legend(['Equal Weights', 'Market Cap Weighted', 'Revenue Weighted'])
plt.xlabel('')
plt.ylabel(r"$\kappa$ weight")
plt.ylim(0, 1)
plt.savefig(f_profitweights, bbox_inches="tight")
# %%
# Appendix Figure 13
plt.clf()
qtr_mean[['kappa', 'kappa_sqrt', 'kappa_pow2',
'kappa_pow3']].plot(figsize=(20, 10))
#plt.title("Average Pairwise Profit Weights $(\kappa)$ Under Different Control Assumptions")
plt.xlabel("")
plt.ylabel(r"$\kappa$ weight")
plt.ylim(0, 1)
plt.legend([r'$\gamma = \beta$',
r'$\gamma \propto \sqrt{\beta}$',
r'$\gamma \propto \beta^2$',
r'$\gamma \propto \beta^3$'])
plt.savefig(f_profitweights_all, bbox_inches="tight")
# %%
# Figure 15: Within Between
plt.clf()
total_df[(total_df.same_sic == True)].groupby(
['quarter'])['kappa'].mean().plot(figsize=(20, 10))
total_df[(total_df.same_sic == False)].groupby(
['quarter'])['kappa'].mean().plot()
#plt.title("Average Pairwise Profit Weights $(\kappa)$ Within and Between SIC code")
plt.xlabel("")
plt.ylabel(r"$\kappa$ weight")
plt.ylim(0, 1)
plt.legend([r"$\kappa$ same SIC", r"$\kappa$ different SIC"])
plt.savefig(f_within_between, bbox_inches="tight")
# %%
# Response Quantiles of Kappa
plt.clf()
kappa_pct[['95%', '75%', '50%', '25%', '5%']].plot(figsize=(20, 10))
plt.legend(['95th percentile',
'75th percentile',
'50th percentile',
'25th percentile',
'5th percentile'])
plt.ylabel(r"$\kappa$ Quantiles")
plt.xlabel("")
plt.ylim(0, 1)
plt.savefig(f_kappa_quantile, bbox_inches="tight")
# %%
# ### Concentration
# - IHHI (Figure 6)
# - Cosine Similarity (Figure 7)
# - CHHI (Figure 14 - 2 parts)
# Figure 6
ihhi_pct[['95%', '75%', '50%', '25%', '5%']].plot(figsize=(20, 10))
plt.legend(['95th percentile',
'75th percentile',
'50th percentile',
'25th percentile',
'5th percentile'])
plt.ylabel("Investor HHI")
plt.xlabel("")
plt.ylim(0, 600)
plt.savefig(f_ihhi, bbox_inches="tight")
# %%
# Figure 7
total_df.groupby(['quarter'])[['kappa', 'cosine', 'l1_measure']
].mean().plot(figsize=(20, 10))
plt.xlabel("")
#plt.title("Cosine Similarity and $\kappa$")
plt.ylim(0, 1)
plt.legend([r'$\kappa_{f,g}$',
r'$L_2$ similarity $cos(\beta_f,\beta_g)$',
r'$L_1$ similarity $|\beta_f - \beta_g|$'])
plt.savefig(f_cosine, bbox_inches="tight")
# %%
# Figure 14a
df_firm[['quarter', 'ihhi', 'chhi_05', 'chhi_2', 'chhi_3', 'chhi_4']
].groupby(['quarter']).mean().plot(figsize=(20, 10))
plt.xlabel("")
plt.ylabel("Effective Control HHI")
plt.ylim(0, 3500)
plt.legend([r'$\gamma = \beta$',
r'$\gamma \propto \sqrt{\beta}$',
r'$\gamma \propto \beta^2$',
r'$\gamma \propto \beta^3$',
r'$\gamma \propto \beta^4$'])
plt.savefig(f_chhi, bbox_inches="tight")
# %%
# Figure 14b
df_firm[['quarter', 'ihhi', 'chhi_05']].groupby(
['quarter']).mean().plot(figsize=(20, 10))
plt.xlabel("")
plt.ylabel("Effective Control HHI")
plt.ylim(0, 350)
plt.legend([r'$\gamma = \beta$', r'$\gamma \propto \sqrt{\beta}$', ])
plt.savefig(f_chhi2, bbox_inches="tight")
# ### Tunneling
# - Figure 9: Tunneling
# - App Figure C-6: Tunneling (Alternative Control)
# %%
(100.0 * tunnel_df[['kappa']]).plot(figsize=(20, 10))
plt.xlabel("")
#plt.title("Potential Tunneling")
plt.ylabel(r"Percentage of $\kappa$ > 1")
plt.legend('')
plt.ylim(0, 12)
#plt.legend([r'$\gamma = \beta$',r'$\gamma \propto \sqrt{\beta}$',r'$\gamma \propto \beta^2$',r'$\gamma \propto \beta^3$'])
plt.savefig(f_tunneling, bbox_inches="tight")
# %%
(100.0 * tunnel_df[['kappa', 'kappa_sqrt',
'kappa_pow2', 'kappa_pow3']]).plot(figsize=(20, 10))
plt.xlabel("")
#plt.title("Potential Tunneling")
plt.ylabel(r"Percentage of $\kappa$ > 1")
plt.ylim(0, 20)
plt.legend([r'$\gamma = \beta$',
r'$\gamma \propto \sqrt{\beta}$',
r'$\gamma \propto \beta^2$',
r'$\gamma \propto \beta^3$'])
plt.savefig(f_kap1, bbox_inches="tight")
|
# head percolation table
from pyparsing import OneOrMore, nestedExpr
def all_left(label, child_labels):
return 0
def all_right(label, child_labels):
return len(child_labels) - 1
def ftb(label, children):
for rule in ftb_rules:
if label == rule[0] or rule[0] == "*":
for subrule in rule[1:]:
if subrule[0] == "first":
r = range(len(children))
elif subrule[0] == "last":
r = range(len(children) - 1, 0 - 1, -1)
else:
raise RuntimeError("Unknown direction: %s" % subrule[0])
for i in r:
if children[i] in subrule[1:] or subrule[1:][0] == "*":
return i
# print(label, children)
# raise RuntimeError("Rule did not match anything!")
# there is * rule so this should not happen
raise RuntimeError("Unmatched label: %s" % label)
ftb_rules = OneOrMore(nestedExpr(ignoreExpr=None)).parseString("""(
(S1 (first SENT) )
(PONCT (last *) )
(Sint (last VN) (last AP) (last NP) (last PP) (last VPinf)
(last Ssub) (last VPpart) (last A ADJ ADJWH) (last ADV
ADVWH) )
(VPpart (first VPR VPP) (first VN) )
(SENT (last VN) (last AP) (last NP) (last Srel) (last
VPpart) (last AdP) (last I) (last Ssub) (last VPinf) (last PP)
(last ADV ADVWH) )
(COORD (first CS CC PONCT) )
(AP (last A ADJ ADJWH) (last ET) (last VPP) (last ADV
ADVWH) )
(NP (first NPP PROREL PRO NC PROWH) (first NP)
(first A ADJ ADJWH) (first AP) (first I) (first VPpart) (first
ADV ADVWH) (first AdP) (first ET) (first DETWH DET)
)
(VPinf (first VN) (first VIMP VPR VS VINF V VPP) )
(PP (first P) (first P+D) (first NP P+PRO) )
(Ssub (last VN) (last AP) (last NP) (last PP) (last VPinf)
(last Ssub) (last VPpart) (last A ADJ ADJWH) (last ADV
ADVWH) )
(VN (last VIMP VPR VS VINF V VPP) (last VPinf) )
(Srel (last VN) (last AP) (last NP) )
(AdP (last ADV ADVWH) )
(* (first *) )
)""").asList()[0]
tables = {
"first": all_left,
"last": all_right,
"ftb": ftb
}
def extract_dependencies(node, callback):
words = list()
head_dict = dict()
root = extract_head(node, words, head_dict, callback)
head_dict[root] = -1
heads = [-2] * len(words)
for mod, head in head_dict.items():
heads[mod] = head
if any(h == -2 for h in heads):
raise RuntimeError("Error during lexicalization!")
return heads
def extract_head(node, words, heads, callback):
if len(node) == 0:
raise IOError("Node without label")
if len(node) == 1:
raise RuntimeError("Should not happen: leaf nodes must be parsed at the same time as the POS tag. Is there missing POS tags in the data?")
else:
if len(node) == 2 and type(node[1]) == str:
# this is a POS tag node
words.append(node[1])
return len(words) - 1
else:
label = node[0]
child_heads = []
child_labels = []
for child in node[1:]:
child_heads.append(extract_head(child, words, heads, callback))
child_labels.append(child[0])
if len(child_heads) == 1:
return child_heads[0]
else:
head = child_heads[callback(label, child_labels)]
for i, modifier in enumerate(child_heads):
heads[modifier] = head
return head
|
"""Wrapper to make the a1 environment suitable for OpenAI gym."""
import gym
from mpi4py import MPI
from motion_imitation.envs import env_builder
from motion_imitation.robots import a1
from motion_imitation.robots import robot_config
class A1GymEnv(gym.Env):
"""A1 environment that supports the gym interface."""
metadata = {'render.modes': ['rgb_array']}
def __init__(self,
action_limit=(0.75, 0.75, 0.75),
render=False,
on_rack=False):
num_procs = MPI.COMM_WORLD.Get_size()
self._env = env_builder.build_imitation_env(motion_files=['motion_imitation/data/motions/dog_pace.txt'],
num_parallel_envs=num_procs,
mode='train',
enable_randomizer=True,
enable_rendering=True)
# self._env = env_builder.build_regular_env(
# a1.A1,
# motor_control_mode=robot_config.MotorControlMode.POSITION,
# enable_rendering=render,
# action_limit=action_limit,
# on_rack=on_rack)
self.observation_space = self._env.observation_space
self.action_space = self._env.action_space
def step(self, action):
return self._env.step(action)
def reset(self):
return self._env.reset()
def close(self):
self._env.close()
def render(self, mode):
return self._env.render(mode)
def __getattr__(self, attr):
return getattr(self._env, attr)
|
# MIT License
# Copyright (c) 2022 christiandimaio
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
##### INTERFACE CLASS DON'T USE IT (You at most use only as Type Hint), JUST READ IT.
################################################################
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Tuple,List
class IDecoder(nn.Module):
"""
Class interface for a LSTM unit
Args are intended as suggested.
"""
def __init__(self, *args):
"""Define the interface of a generic constructor for the Decoder Net.
Args (Suggested):
hidden_dim (int):
The Capacity of the LSTM Cell.
padding_index (int):
The index of the padding id, given from the vocabulary associated to the dataset.
vocab_size (int)):
The size of the vocabulary associated to the dataset.
embedding_dim (int):
The number of features associated to a word.
device (str, optional): Default "cpu"
The device on which the operations will be performed.
"""
super(IDecoder, self).__init__()
def forward(self, *args) -> Tuple[torch.Tensor, List[int]]:
"""Interface for the forward operation of the RNN.
Args (Suggested):
images (torch.Tensor): `(batch_dim, encoder_dim)`
The features associated to each image of the batch.
captions (torch.Tensor): `(batch_dim, max_captions_length, embedding_dim)`
The caption associated to each image of the batch.
_REMARK Each caption is in the full form: <START> + .... + <END>_
caption_length (list(int)):
The length of each caption in the batch.
Returns: `[(batch_size, max_captions_length, vocab_size), list(int)]`
(torch.Tensor): The hidden state of each time step from t_1 to t_N.
(list(int)): The length of each decoded caption.
REMARK The <START> is provided as input at t_0.
REMARK The <END> token will be removed from the input of the LSTM.
"""
pass
def generate_caption(self, *args) -> torch.Tensor:
""" Interface for generate a caption
Args (Suggested):
images (torch.Tensor): `(1, encoder_dim)`
The features associated to the image.
max_caption_length (int):
The maximum ammisible length of the caption.
Returns:
(torch.Tensor): `(1, <variable>)`
The caption associated to the image given.
REMARK It includes <START> at t_0 by default.
"""
pass
|
'''Put item'''
import json
import logging
import os
import boto3
from tenacity import retry, stop_after_delay, wait_random_exponential
log_level = os.environ.get('LOG_LEVEL', 'INFO')
logging.root.setLevel(logging.getLevelName(log_level)) # type: ignore
_logger = logging.getLogger(__name__)
# DynamoDB
DDB_TABLE_NAME = os.environ.get('DDB_TABLE_NAME')
DDB_TABLE_HASH_KEY = os.environ.get('DDB_TABLE_HASH_KEY')
dynamodb = boto3.resource('dynamodb')
DDT = dynamodb.Table(DDB_TABLE_NAME)
@retry(wait=wait_random_exponential(), stop=stop_after_delay(15))
def _get_item(item_id):
'''Put record item'''
result = DDT.get_item(
Key={
DDB_TABLE_HASH_KEY: item_id
}
)
return result.get('Item', {})
def handler(event, context):
'''Function entry'''
_logger.debug('Event received: {}'.format(json.dumps(event)))
item_id = event['pathParameters']['id']
item = _get_item(item_id)
resp = {
'statusCode': 200,
'body': json.dumps(item)
}
_logger.debug('Response: {}'.format(json.dumps(resp)))
return resp
|
from codalab.bundles.dataset_bundle import DatasetBundle
from codalab.bundles.make_bundle import MakeBundle
from codalab.bundles.program_bundle import ProgramBundle
from codalab.bundles.run_bundle import RunBundle
from codalab.bundles.private_bundle import PrivateBundle
BUNDLE_SUBCLASSES = (DatasetBundle, MakeBundle, ProgramBundle, RunBundle, PrivateBundle)
BUNDLE_TYPE_MAP = {cls.BUNDLE_TYPE: cls for cls in BUNDLE_SUBCLASSES}
assert len(BUNDLE_TYPE_MAP) == len(BUNDLE_SUBCLASSES), 'bundle_type collision: %s' % (
BUNDLE_TYPE_MAP,
)
def get_bundle_subclass(bundle_type):
return BUNDLE_TYPE_MAP[bundle_type]
|
from aes.transformations import *
from aes.key_expansion import *
TESTKEY = bytearray(range(16))
def test_sboxes():
"""
>>> test_sboxes()
63 7c 77 7b f2 6b 6f c5 30 01 67 2b fe d7 ab 76
ca 82 c9 7d fa 59 47 f0 ad d4 a2 af 9c a4 72 c0
b7 fd 93 26 36 3f f7 cc 34 a5 e5 f1 71 d8 31 15
04 c7 23 c3 18 96 05 9a 07 12 80 e2 eb 27 b2 75
09 83 2c 1a 1b 6e 5a a0 52 3b d6 b3 29 e3 2f 84
53 d1 00 ed 20 fc b1 5b 6a cb be 39 4a 4c 58 cf
d0 ef aa fb 43 4d 33 85 45 f9 02 7f 50 3c 9f a8
51 a3 40 8f 92 9d 38 f5 bc b6 da 21 10 ff f3 d2
cd 0c 13 ec 5f 97 44 17 c4 a7 7e 3d 64 5d 19 73
60 81 4f dc 22 2a 90 88 46 ee b8 14 de 5e 0b db
e0 32 3a 0a 49 06 24 5c c2 d3 ac 62 91 95 e4 79
e7 c8 37 6d 8d d5 4e a9 6c 56 f4 ea 65 7a ae 08
ba 78 25 2e 1c a6 b4 c6 e8 dd 74 1f 4b bd 8b 8a
70 3e b5 66 48 03 f6 0e 61 35 57 b9 86 c1 1d 9e
e1 f8 98 11 69 d9 8e 94 9b 1e 87 e9 ce 55 28 df
8c a1 89 0d bf e6 42 68 41 99 2d 0f b0 54 bb 16
<BLANKLINE>
52 09 6a d5 30 36 a5 38 bf 40 a3 9e 81 f3 d7 fb
7c e3 39 82 9b 2f ff 87 34 8e 43 44 c4 de e9 cb
54 7b 94 32 a6 c2 23 3d ee 4c 95 0b 42 fa c3 4e
08 2e a1 66 28 d9 24 b2 76 5b a2 49 6d 8b d1 25
72 f8 f6 64 86 68 98 16 d4 a4 5c cc 5d 65 b6 92
6c 70 48 50 fd ed b9 da 5e 15 46 57 a7 8d 9d 84
90 d8 ab 00 8c bc d3 0a f7 e4 58 05 b8 b3 45 06
d0 2c 1e 8f ca 3f 0f 02 c1 af bd 03 01 13 8a 6b
3a 91 11 41 4f 67 dc ea 97 f2 cf ce f0 b4 e6 73
96 ac 74 22 e7 ad 35 85 e2 f9 37 e8 1c 75 df 6e
47 f1 1a 71 1d 29 c5 89 6f b7 62 0e aa 18 be 1b
fc 56 3e 4b c6 d2 79 20 9a db c0 fe 78 cd 5a f4
1f dd a8 33 88 07 c7 31 b1 12 10 59 27 80 ec 5f
60 51 7f a9 19 b5 4a 0d 2d e5 7a 9f 93 c9 9c ef
a0 e0 3b 4d ae 2a f5 b0 c8 eb bb 3c 83 53 99 61
17 2b 04 7e ba 77 d6 26 e1 69 14 63 55 21 0c 7d
"""
for a in range(16):
row = ""
for b in range(16):
x = a*16+b
row += "{:02x} ".format(sub_byte(x))
print(row.strip())
print()
for a in range(16):
row = ""
for b in range(16):
x = a*16+b
row += "{:02x} ".format(inv_sub_byte(x))
print(row.strip())
def test_key_expansion():
"""
>>> test_key_expansion()
00: 000102030405060708090a0b0c0d0e0f
01: d6aa74fdd2af72fadaa678f1d6ab76fe
02: b692cf0b643dbdf1be9bc5006830b3fe
03: b6ff744ed2c2c9bf6c590cbf0469bf41
04: 47f7f7bc95353e03f96c32bcfd058dfd
05: 3caaa3e8a99f9deb50f3af57adf622aa
06: 5e390f7df7a69296a7553dc10aa31f6b
07: 14f9701ae35fe28c440adf4d4ea9c026
08: 47438735a41c65b9e016baf4aebf7ad2
09: 549932d1f08557681093ed9cbe2c974e
10: 13111d7fe3944a17f307a78b4d2b30c5
"""
ekey = key_expansion(TESTKEY)
for i in range(len(ekey)):
print("{:02d}: {}".format(i, ekey[i].hex()))
if __name__ == "__main__":
import doctest
import aes.transformations as xforms
import aes.finite_field as ff
import tests
doctest.testmod(xforms)
doctest.testmod(ff)
doctest.testmod(tests) # yup, we can test ourself...
|
import itertools
import numpy as np
import tensorflow as tf
from tensorflow.python.util import nest
from video_prediction import ops, flow_ops
from video_prediction.models import VideoPredictionModel
from video_prediction.models import pix2pix_model, mocogan_model, spectral_norm_model
from video_prediction.ops import lrelu, dense, pad2d, conv2d, conv_pool2d, flatten, tile_concat, pool2d
from video_prediction.rnn_ops import BasicConv2DLSTMCell, Conv2DGRUCell
from video_prediction.utils import tf_utils
# Amount to use when lower bounding tensors
RELU_SHIFT = 1e-12
def create_legacy_encoder(inputs,
nz=8,
nef=64,
norm_layer='instance',
include_top=True):
norm_layer = ops.get_norm_layer(norm_layer)
with tf.variable_scope('h1'):
h1 = conv_pool2d(inputs, nef, kernel_size=5, strides=2)
h1 = norm_layer(h1)
h1 = tf.nn.relu(h1)
with tf.variable_scope('h2'):
h2 = conv_pool2d(h1, nef * 2, kernel_size=5, strides=2)
h2 = norm_layer(h2)
h2 = tf.nn.relu(h2)
with tf.variable_scope('h3'):
h3 = conv_pool2d(h2, nef * 4, kernel_size=5, strides=2)
h3 = norm_layer(h3)
h3 = tf.nn.relu(h3)
h3_flatten = flatten(h3)
if include_top:
with tf.variable_scope('z_mu'):
z_mu = dense(h3_flatten, nz)
with tf.variable_scope('z_log_sigma_sq'):
z_log_sigma_sq = dense(h3_flatten, nz)
z_log_sigma_sq = tf.clip_by_value(z_log_sigma_sq, -10, 10)
outputs = {'enc_zs_mu': z_mu, 'enc_zs_log_sigma_sq': z_log_sigma_sq}
else:
outputs = h3_flatten
return outputs
def create_n_layer_encoder(inputs,
nz=8,
nef=64,
n_layers=3,
norm_layer='instance',
include_top=True):
norm_layer = ops.get_norm_layer(norm_layer)
layers = []
paddings = [[0, 0], [1, 1], [1, 1], [0, 0]]
with tf.variable_scope("layer_1"):
convolved = conv2d(tf.pad(inputs, paddings), nef, kernel_size=4, strides=2, padding='VALID')
rectified = lrelu(convolved, 0.2)
layers.append(rectified)
for i in range(1, n_layers):
with tf.variable_scope("layer_%d" % (len(layers) + 1)):
out_channels = nef * min(2**i, 4)
convolved = conv2d(tf.pad(layers[-1], paddings), out_channels, kernel_size=4, strides=2, padding='VALID')
normalized = norm_layer(convolved)
rectified = lrelu(normalized, 0.2)
layers.append(rectified)
pooled = pool2d(rectified, rectified.shape[1:3].as_list(), padding='VALID', pool_mode='avg')
squeezed = tf.squeeze(pooled, [1, 2])
if include_top:
with tf.variable_scope('z_mu'):
z_mu = dense(squeezed, nz)
with tf.variable_scope('z_log_sigma_sq'):
z_log_sigma_sq = dense(squeezed, nz)
z_log_sigma_sq = tf.clip_by_value(z_log_sigma_sq, -10, 10)
outputs = {'enc_zs_mu': z_mu, 'enc_zs_log_sigma_sq': z_log_sigma_sq}
else:
outputs = squeezed
return outputs
def create_encoder(inputs, e_net='legacy', use_e_rnn=False, rnn='lstm', **kwargs):
assert inputs.shape.ndims == 5
batch_shape = inputs.shape[:-3].as_list()
inputs = flatten(inputs, 0, len(batch_shape) - 1)
unflatten = lambda x: tf.reshape(x, batch_shape + x.shape.as_list()[1:])
if use_e_rnn:
if e_net == 'legacy':
kwargs.pop('n_layers', None) # unused
h = create_legacy_encoder(inputs, include_top=False, **kwargs)
with tf.variable_scope('h4'):
h = dense(h, kwargs['nef'] * 4)
elif e_net == 'n_layer':
h = create_n_layer_encoder(inputs, include_top=False, **kwargs)
with tf.variable_scope('layer_%d' % (kwargs['n_layers'] + 1)):
h = dense(h, kwargs['nef'] * 4)
else:
raise ValueError('Invalid encoder net %s' % e_net)
if rnn == 'lstm':
RNNCell = tf.contrib.rnn.BasicLSTMCell
elif rnn == 'gru':
RNNCell = tf.contrib.rnn.GRUCell
else:
raise NotImplementedError
h = nest.map_structure(unflatten, h)
for i in range(2):
with tf.variable_scope('%s_h%d' % (rnn, i)):
rnn_cell = RNNCell(kwargs['nef'] * 4)
h, _ = tf.nn.dynamic_rnn(rnn_cell, h, dtype=tf.float32, time_major=True)
h = flatten(h, 0, len(batch_shape) - 1)
with tf.variable_scope('z_mu'):
z_mu = dense(h, kwargs['nz'])
with tf.variable_scope('z_log_sigma_sq'):
z_log_sigma_sq = dense(h, kwargs['nz'])
z_log_sigma_sq = tf.clip_by_value(z_log_sigma_sq, -10, 10)
outputs = {'enc_zs_mu': z_mu, 'enc_zs_log_sigma_sq': z_log_sigma_sq}
else:
if e_net == 'legacy':
kwargs.pop('n_layers', None) # unused
outputs = create_legacy_encoder(inputs, include_top=True, **kwargs)
elif e_net == 'n_layer':
outputs = create_n_layer_encoder(inputs, include_top=True, **kwargs)
else:
raise ValueError('Invalid encoder net %s' % e_net)
outputs = nest.map_structure(unflatten, outputs)
return outputs
def encoder_fn(inputs, hparams=None):
images = inputs['images']
image_pairs = tf.concat([images[:hparams.sequence_length - 1],
images[1:hparams.sequence_length]], axis=-1)
if 'actions' in inputs:
image_pairs = tile_concat([image_pairs,
tf.expand_dims(tf.expand_dims(inputs['actions'], axis=-2), axis=-2)], axis=-1)
outputs = create_encoder(image_pairs,
e_net=hparams.e_net,
use_e_rnn=hparams.use_e_rnn,
rnn=hparams.rnn,
nz=hparams.nz,
nef=hparams.nef,
n_layers=hparams.n_layers,
norm_layer=hparams.norm_layer)
return outputs
def discriminator_fn(targets, inputs=None, hparams=None):
outputs = {}
if hparams.gan_weight or hparams.vae_gan_weight:
_, pix2pix_outputs = pix2pix_model.discriminator_fn(targets, inputs=inputs, hparams=hparams)
outputs.update(pix2pix_outputs)
if hparams.image_gan_weight or hparams.image_vae_gan_weight or \
hparams.video_gan_weight or hparams.video_vae_gan_weight or \
hparams.acvideo_gan_weight or hparams.acvideo_vae_gan_weight:
_, mocogan_outputs = mocogan_model.discriminator_fn(targets, inputs=inputs, hparams=hparams)
outputs.update(mocogan_outputs)
if hparams.image_sn_gan_weight or hparams.image_sn_vae_gan_weight or \
hparams.video_sn_gan_weight or hparams.video_sn_vae_gan_weight:
_, spectral_norm_outputs = spectral_norm_model.discriminator_fn(targets, inputs=inputs, hparams=hparams)
outputs.update(spectral_norm_outputs)
return None, outputs
class DNACell(tf.nn.rnn_cell.RNNCell):
def __init__(self, inputs, hparams, reuse=None):
super(DNACell, self).__init__(_reuse=reuse)
self.num_ensembles = hparams.num_ensembles
self.inputs = inputs
self.hparams = hparams
if self.hparams.where_add not in ('input', 'all', 'middle'):
raise ValueError('Invalid where_add %s' % self.hparams.where_add)
batch_size = inputs['images'].shape[1].value
assert batch_size % self.num_ensembles == 0
image_shape = inputs['images'].shape.as_list()[2:]
height, width, _ = image_shape
scale_size = max(height, width)
if scale_size == 256:
self.encoder_layer_specs = [
(self.hparams.ngf, False),
(self.hparams.ngf * 2, False),
(self.hparams.ngf * 4, True),
(self.hparams.ngf * 8, True),
(self.hparams.ngf * 8, True),
]
self.decoder_layer_specs = [
(self.hparams.ngf * 8, True),
(self.hparams.ngf * 4, True),
(self.hparams.ngf * 2, False),
(self.hparams.ngf, False),
(self.hparams.ngf, False),
]
elif scale_size == 64:
self.encoder_layer_specs = [
(self.hparams.ngf, True),
(self.hparams.ngf * 2, True),
(self.hparams.ngf * 4, True),
]
self.decoder_layer_specs = [
(self.hparams.ngf * 2, True),
(self.hparams.ngf, True),
(self.hparams.ngf, False),
]
elif scale_size == 32:
self.encoder_layer_specs = [
(self.hparams.ngf, True),
(self.hparams.ngf * 2, True),
]
self.decoder_layer_specs = [
(self.hparams.ngf, True),
(self.hparams.ngf, False),
]
else:
raise NotImplementedError
# output_size
gen_input_shape = list(image_shape)
if 'actions' in inputs:
gen_input_shape[-1] += inputs['actions'].shape[-1].value
num_masks = self.hparams.last_frames * self.hparams.num_transformed_images + \
int(bool(self.hparams.prev_image_background)) + \
int(bool(self.hparams.first_image_background and not self.hparams.context_images_background)) + \
(self.hparams.context_frames if self.hparams.context_images_background else 0) + \
int(bool(self.hparams.generate_scratch_image))
output_size = {
'gen_images': tf.TensorShape(image_shape),
'gen_inputs': tf.TensorShape(gen_input_shape),
'transformed_images': tf.TensorShape(image_shape + [num_masks]),
'masks': tf.TensorShape([height, width, 1, num_masks]),
}
if 'pix_distribs' in inputs:
num_motions = inputs['pix_distribs'].shape[-1].value
output_size['gen_pix_distribs'] = tf.TensorShape([height, width, num_motions])
output_size['transformed_pix_distribs'] = tf.TensorShape([height, width, num_motions, num_masks])
if 'states' in inputs:
output_size['gen_states'] = inputs['states'].shape[2:]
if self.hparams.transformation == 'flow':
output_size['gen_flows'] = tf.TensorShape([height, width, 2, self.hparams.last_frames * self.hparams.num_transformed_images])
output_size['gen_flows_rgb'] = tf.TensorShape([height, width, 3, self.hparams.last_frames * self.hparams.num_transformed_images])
self._output_size = output_size
# state_size
conv_rnn_state_sizes = []
conv_rnn_height, conv_rnn_width = height, width
for out_channels, use_conv_rnn in self.encoder_layer_specs:
conv_rnn_height //= 2
conv_rnn_width //= 2
if use_conv_rnn:
conv_rnn_state_sizes.append(tf.TensorShape([conv_rnn_height, conv_rnn_width, out_channels]))
for out_channels, use_conv_rnn in self.decoder_layer_specs:
conv_rnn_height *= 2
conv_rnn_width *= 2
if use_conv_rnn:
conv_rnn_state_sizes.append(tf.TensorShape([conv_rnn_height, conv_rnn_width, out_channels]))
if self.hparams.conv_rnn == 'lstm':
conv_rnn_state_sizes = [tf.nn.rnn_cell.LSTMStateTuple(conv_rnn_state_size, conv_rnn_state_size)
for conv_rnn_state_size in conv_rnn_state_sizes]
state_size = {'time': tf.TensorShape([]),
'gen_image': tf.TensorShape(image_shape),
'last_images': [tf.TensorShape(image_shape)] * self.hparams.last_frames,
'conv_rnn_states': conv_rnn_state_sizes}
if 'zs' in inputs and self.hparams.use_rnn_z:
rnn_z_state_size = tf.TensorShape([self.hparams.nz])
if self.hparams.rnn == 'lstm':
rnn_z_state_size = tf.nn.rnn_cell.LSTMStateTuple(rnn_z_state_size, rnn_z_state_size)
state_size['rnn_z_state'] = rnn_z_state_size
if 'pix_distribs' in inputs:
state_size['gen_pix_distrib'] = tf.TensorShape([height, width, num_motions])
state_size['last_pix_distribs'] = [tf.TensorShape([height, width, num_motions])] * self.hparams.last_frames
if 'states' in inputs:
state_size['gen_state'] = inputs['states'].shape[2:]
self._state_size = state_size
ground_truth_sampling_shape = [self.hparams.sequence_length - 1 - self.hparams.context_frames, batch_size]
if self.hparams.schedule_sampling == 'none':
ground_truth_sampling = tf.constant(False, dtype=tf.bool, shape=ground_truth_sampling_shape)
elif self.hparams.schedule_sampling in ('inverse_sigmoid', 'linear'):
if self.hparams.schedule_sampling == 'inverse_sigmoid':
k = self.hparams.schedule_sampling_k
start_step = self.hparams.schedule_sampling_steps[0]
iter_num = tf.to_float(tf.train.get_or_create_global_step())
prob = (k / (k + tf.exp((iter_num - start_step) / k)))
prob = tf.cond(tf.less(iter_num, start_step), lambda: 1.0, lambda: prob)
elif self.hparams.schedule_sampling == 'linear':
start_step, end_step = self.hparams.schedule_sampling_steps
step = tf.clip_by_value(tf.train.get_or_create_global_step(), start_step, end_step)
prob = 1.0 - tf.to_float(step - start_step) / tf.to_float(end_step - start_step)
log_probs = tf.log([1 - prob, prob])
ground_truth_sampling = tf.multinomial([log_probs] * batch_size, ground_truth_sampling_shape[0])
ground_truth_sampling = tf.cast(tf.transpose(ground_truth_sampling, [1, 0]), dtype=tf.bool)
# Ensure that eventually, the model is deterministically
# autoregressive (as opposed to autoregressive with very high probability).
ground_truth_sampling = tf.cond(tf.less(prob, 0.001),
lambda: tf.constant(False, dtype=tf.bool, shape=ground_truth_sampling_shape),
lambda: ground_truth_sampling)
else:
raise NotImplementedError
ground_truth_context = tf.constant(True, dtype=tf.bool, shape=[self.hparams.context_frames, batch_size])
self.ground_truth = tf.concat([ground_truth_context, ground_truth_sampling], axis=0)
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def zero_state(self, batch_size, dtype):
init_state = super(DNACell, self).zero_state(batch_size, dtype)
init_state['last_images'] = [self.inputs['images'][0]] * self.hparams.last_frames
if 'pix_distribs' in self.inputs:
init_state['last_pix_distribs'] = [self.inputs['pix_distribs'][0]] * self.hparams.last_frames
return init_state
def _rnn_func(self, inputs, state, num_units):
if self.hparams.rnn == 'lstm':
RNNCell = tf.contrib.rnn.BasicLSTMCell
elif self.hparams.rnn == 'gru':
RNNCell = tf.contrib.rnn.GRUCell
else:
raise NotImplementedError
rnn_cell = RNNCell(num_units, reuse=tf.get_variable_scope().reuse)
return rnn_cell(inputs, state)
def _conv_rnn_func(self, inputs, state, filters):
inputs_shape = inputs.get_shape().as_list()
input_shape = inputs_shape[1:]
if self.hparams.norm_layer == 'none':
normalizer_fn = None
else:
normalizer_fn = ops.get_norm_layer(self.hparams.norm_layer)
if self.hparams.conv_rnn == 'lstm':
Conv2DRNNCell = BasicConv2DLSTMCell
elif self.hparams.conv_rnn == 'gru':
Conv2DRNNCell = Conv2DGRUCell
else:
raise NotImplementedError
if self.hparams.ablation_conv_rnn_norm:
conv_rnn_cell = Conv2DRNNCell(input_shape, filters, kernel_size=(5, 5),
reuse=tf.get_variable_scope().reuse)
h, state = conv_rnn_cell(inputs, state)
outputs = (normalizer_fn(h), state)
else:
conv_rnn_cell = Conv2DRNNCell(input_shape, filters, kernel_size=(5, 5),
normalizer_fn=normalizer_fn,
separate_norms=self.hparams.norm_layer == 'layer',
reuse=tf.get_variable_scope().reuse)
outputs = conv_rnn_cell(inputs, state)
return outputs
def call(self, inputs, states):
norm_layer = ops.get_norm_layer(self.hparams.norm_layer)
downsample_layer = ops.get_downsample_layer(self.hparams.downsample_layer)
upsample_layer = ops.get_upsample_layer(self.hparams.upsample_layer)
image_shape = inputs['images'].get_shape().as_list()
batch_size, height, width, color_channels = image_shape
conv_rnn_states = states['conv_rnn_states']
time = states['time']
with tf.control_dependencies([tf.assert_equal(time[1:], time[0])]):
t = tf.to_int32(tf.identity(time[0]))
image = tf.where(self.ground_truth[t], inputs['images'], states['gen_image']) # schedule sampling (if any)
last_images = states['last_images'][1:] + [image]
if 'pix_distribs' in inputs:
pix_distrib = tf.where(self.ground_truth[t], inputs['pix_distribs'], states['gen_pix_distrib'])
last_pix_distribs = states['last_pix_distribs'][1:] + [pix_distrib]
if 'states' in inputs:
state = tf.where(self.ground_truth[t], inputs['states'], states['gen_state'])
state_action = []
state_action_z = []
if 'actions' in inputs:
state_action.append(inputs['actions'])
state_action_z.append(inputs['actions'])
if 'states' in inputs:
state_action.append(state)
# don't backpropagate the convnet through the state dynamics
state_action_z.append(tf.stop_gradient(state))
if 'zs' in inputs:
if self.hparams.use_rnn_z:
with tf.variable_scope('%s_z' % self.hparams.rnn):
rnn_z, rnn_z_state = self._rnn_func(inputs['zs'], states['rnn_z_state'], self.hparams.nz)
state_action_z.append(rnn_z)
else:
state_action_z.append(inputs['zs'])
def concat(tensors, axis):
if len(tensors) == 0:
return tf.zeros([batch_size, 0])
elif len(tensors) == 1:
return tensors[0]
else:
return tf.concat(tensors, axis=axis)
state_action = concat(state_action, axis=-1)
state_action_z = concat(state_action_z, axis=-1)
if 'actions' in inputs:
gen_input = tile_concat([image, inputs['actions'][:, None, None, :]], axis=-1)
else:
gen_input = image
layers = []
new_conv_rnn_states = []
for i, (out_channels, use_conv_rnn) in enumerate(self.encoder_layer_specs):
with tf.variable_scope('h%d' % i):
if i == 0:
h = tf.concat([image, self.inputs['images'][0]], axis=-1)
kernel_size = (5, 5)
else:
h = layers[-1][-1]
kernel_size = (3, 3)
if self.hparams.where_add == 'all' or (self.hparams.where_add == 'input' and i == 0):
h = tile_concat([h, state_action_z[:, None, None, :]], axis=-1)
h = downsample_layer(h, out_channels, kernel_size=kernel_size, strides=(2, 2))
h = norm_layer(h)
h = tf.nn.relu(h)
if use_conv_rnn:
conv_rnn_state = conv_rnn_states[len(new_conv_rnn_states)]
with tf.variable_scope('%s_h%d' % (self.hparams.conv_rnn, i)):
if self.hparams.where_add == 'all':
conv_rnn_h = tile_concat([h, state_action_z[:, None, None, :]], axis=-1)
else:
conv_rnn_h = h
conv_rnn_h, conv_rnn_state = self._conv_rnn_func(conv_rnn_h, conv_rnn_state, out_channels)
new_conv_rnn_states.append(conv_rnn_state)
layers.append((h, conv_rnn_h) if use_conv_rnn else (h,))
num_encoder_layers = len(layers)
for i, (out_channels, use_conv_rnn) in enumerate(self.decoder_layer_specs):
with tf.variable_scope('h%d' % len(layers)):
if i == 0:
h = layers[-1][-1]
else:
h = tf.concat([layers[-1][-1], layers[num_encoder_layers - i - 1][-1]], axis=-1)
if self.hparams.where_add == 'all' or (self.hparams.where_add == 'middle' and i == 0):
h = tile_concat([h, state_action_z[:, None, None, :]], axis=-1)
h = upsample_layer(h, out_channels, kernel_size=(3, 3), strides=(2, 2))
h = norm_layer(h)
h = tf.nn.relu(h)
if use_conv_rnn:
conv_rnn_state = conv_rnn_states[len(new_conv_rnn_states)]
with tf.variable_scope('%s_h%d' % (self.hparams.conv_rnn, len(layers))):
if self.hparams.where_add == 'all':
conv_rnn_h = tile_concat([h, state_action_z[:, None, None, :]], axis=-1)
else:
conv_rnn_h = h
conv_rnn_h, conv_rnn_state = self._conv_rnn_func(conv_rnn_h, conv_rnn_state, out_channels)
new_conv_rnn_states.append(conv_rnn_state)
layers.append((h, conv_rnn_h) if use_conv_rnn else (h,))
assert len(new_conv_rnn_states) == len(conv_rnn_states)
if self.hparams.last_frames and self.hparams.num_transformed_images:
if self.hparams.transformation == 'flow':
ensemble_flows = []
splits = tf.split(layers[-1][-1], self.num_ensembles, axis=0)
for ens in range(self.num_ensembles):
with tf.variable_scope('h%d_flow_ens%d' % (len(layers), ens)):
h_flow = conv2d(splits[ens], self.hparams.ngf, kernel_size=(3, 3), strides=(1, 1))
h_flow = norm_layer(h_flow)
h_flow = tf.nn.relu(h_flow)
with tf.variable_scope('flows_ens{}'.format(ens)):
flows = conv2d(h_flow, 2 * self.hparams.last_frames * self.hparams.num_transformed_images, kernel_size=(3, 3), strides=(1, 1))
flows = tf.reshape(flows, [batch_size//self.num_ensembles, height, width, 2, self.hparams.last_frames * self.hparams.num_transformed_images])
ensemble_flows.append(flows)
else:
assert len(self.hparams.kernel_size) == 2
kernel_shape = list(self.hparams.kernel_size) + [self.hparams.last_frames * self.hparams.num_transformed_images]
if self.hparams.transformation == 'dna':
with tf.variable_scope('h%d_dna_kernel' % len(layers)):
h_dna_kernel = conv2d(layers[-1][-1], self.hparams.ngf, kernel_size=(3, 3), strides=(1, 1))
h_dna_kernel = norm_layer(h_dna_kernel)
h_dna_kernel = tf.nn.relu(h_dna_kernel)
# Using largest hidden state for predicting untied conv kernels.
with tf.variable_scope('dna_kernels'):
kernels = conv2d(h_dna_kernel, np.prod(kernel_shape), kernel_size=(3, 3), strides=(1, 1))
kernels = tf.reshape(kernels, [batch_size, height, width] + kernel_shape)
kernels = kernels + identity_kernel(self.hparams.kernel_size)[None, None, None, :, :, None]
kernel_spatial_axes = [3, 4]
elif self.hparams.transformation == 'cdna':
with tf.variable_scope('cdna_kernels'):
smallest_layer = layers[num_encoder_layers - 1][-1]
kernels = dense(flatten(smallest_layer), np.prod(kernel_shape))
kernels = tf.reshape(kernels, [batch_size] + kernel_shape)
kernels = kernels + identity_kernel(self.hparams.kernel_size)[None, :, :, None]
kernel_spatial_axes = [1, 2]
else:
raise ValueError('Invalid transformation %s' % self.hparams.transformation)
if self.hparams.transformation != 'flow':
with tf.name_scope('kernel_normalization'):
kernels = tf.nn.relu(kernels - RELU_SHIFT) + RELU_SHIFT
kernels /= tf.reduce_sum(kernels, axis=kernel_spatial_axes, keepdims=True)
if self.hparams.generate_scratch_image:
with tf.variable_scope('h%d_scratch' % len(layers)):
h_scratch = conv2d(layers[-1][-1], self.hparams.ngf, kernel_size=(3, 3), strides=(1, 1))
h_scratch = norm_layer(h_scratch)
h_scratch = tf.nn.relu(h_scratch)
# Using largest hidden state for predicting a new image layer.
# This allows the network to also generate one image from scratch,
# which is useful when regions of the image become unoccluded.
with tf.variable_scope('scratch_image'):
scratch_image = conv2d(h_scratch, color_channels, kernel_size=(3, 3), strides=(1, 1))
scratch_image = tf.nn.sigmoid(scratch_image)
with tf.name_scope('transformed_images'):
#transformed_images = ['bla']
ensemble_transformed_images = []
for ens in range(self.num_ensembles):
ensemble_transformed_images.append([])
if self.hparams.last_frames and self.hparams.num_transformed_images:
if self.hparams.transformation == 'flow':
split_context = [tf.split(img, self.num_ensembles, axis=0) for img in last_images]
for ens in range(self.num_ensembles):
ensemble_context = [s[ens] for s in split_context]
ensemble_transformed_images[ens].extend(apply_flows(ensemble_context, ensemble_flows[ens]))
#transformed_images.extend(apply_flows(last_images, flows))
else:
transformed_images.extend(apply_kernels(last_images, kernels, self.hparams.dilation_rate))
if self.hparams.prev_image_background:
split_one = tf.split(image, self.num_ensembles, axis=0)
if self.hparams.first_image_background and not self.hparams.context_images_background:
split_two = tf.split(self.inputs['images'][0], self.num_ensembles, axis=0)
if self.hparams.context_images_background:
split_three = tf.split(tf.unstack(self.inputs['images'][:self.hparams.context_frames]), self.num_ensembles, axis=0)
if self.hparams.generate_scratch_image:
split_four = tf.split(scratch_image, self.num_ensembles, axis=0)
for ens in range(self.num_ensembles):
if self.hparams.prev_image_background:
#ensemble_transformed_images[ens].append(image)
ensemble_transformed_images[ens].append(split_one[ens])
if self.hparams.first_image_background and not self.hparams.context_images_background:
#ensemble_transformed_images[ens].append(self.inputs['images'][0])
ensemble_transformed_images[ens].append(split_two[ens])
if self.hparams.context_images_background:
#ensemble_transformed_images[ens].extend(tf.unstack(self.inputs['images'][:self.hparams.context_frames]))
ensemble_transformed_images[ens].extend(split_three[ens])
if self.hparams.generate_scratch_image:
#ensemble_transformed_images[ens].append(scratch_image)
ensemble_transformed_images[ens].append(split_four[ens])
if 'pix_distribs' in inputs:
with tf.name_scope('transformed_pix_distribs'):
ens_transformed_pix_distribs = [[] for _ in range(self.num_ensembles)]
ens_last_pix_distribs = [tf.split(p, self.num_ensembles) for p in last_pix_distribs]
ens_pix_distrib = tf.split(pix_distrib, self.num_ensembles)
ens_context = [tf.split(c, self.num_ensembles) for c in
tf.unstack(self.inputs['pix_distribs'][:self.hparams.context_frames])]
for ens in range(self.num_ensembles):
if self.hparams.last_frames and self.hparams.num_transformed_images:
if self.hparams.transformation == 'flow':
ens_distribs = [p[ens] for p in ens_last_pix_distribs]
ens_transformed_pix_distribs[ens].extend(apply_flows(ens_distribs, ensemble_flows[ens]))
else:
raise NotImplementedError
if self.hparams.prev_image_background:
ens_transformed_pix_distribs[ens].append(ens_pix_distrib[ens])
if self.hparams.first_image_background and not self.hparams.context_images_background:
ens_transformed_pix_distribs[ens].append(ens_context[0][ens])
if self.hparams.context_images_background:
ens_transformed_pix_distribs[ens].extend([c[ens] for c in ens_context])
if self.hparams.generate_scratch_image:
ens_transformed_pix_distribs[ens].append(pix_distrib[ens])
with tf.name_scope('masks'):
if len(ensemble_transformed_images[0]) > 1:
ensemble_masks = []
splits = tf.split(layers[-1][-1], self.num_ensembles, axis=0)
for ens in range(self.num_ensembles):
with tf.variable_scope('h%d_masks_ens%d' % (len(layers), ens)):
h_masks = conv2d(splits[ens], self.hparams.ngf, kernel_size=(3, 3), strides=(1, 1))
h_masks = norm_layer(h_masks)
h_masks = tf.nn.relu(h_masks)
with tf.variable_scope('masks_ens{}'.format(ens)):
if self.hparams.dependent_mask:
h_masks = tf.concat([h_masks] + ensemble_transformed_images[ens], axis=-1)
masks = conv2d(h_masks, len(ensemble_transformed_images[ens]), kernel_size=(3, 3), strides=(1, 1))
masks = tf.nn.softmax(masks)
masks = tf.split(masks, len(ensemble_transformed_images[ens]), axis=-1)
ensemble_masks.append(masks)
elif len(transformed_images) == 1:
masks = [tf.ones([batch_size, height, width, 1])]
else:
raise ValueError("Either one of the following should be true: "
"last_frames and num_transformed_images, first_image_background, "
"prev_image_background, generate_scratch_image")
with tf.name_scope('gen_images'):
#assert len(transformed_images) == len(masks)
ensemble_gen_image = []
for ens in range(self.num_ensembles):
assert len(ensemble_transformed_images[ens]) == len(ensemble_masks[ens])
ensemble_gen_image.append(tf.add_n([transformed_image * mask
for transformed_image, mask in zip(ensemble_transformed_images[ens], ensemble_masks[ens])]))
masks = [tf.concat([ensemble_masks[j][i] for j in range(self.num_ensembles)], axis=0)
for i in range(len(ensemble_masks[0]))]
if 'pix_distribs' in inputs:
transformed_pix_distribs = [tf.concat([p[t] for p in ens_transformed_pix_distribs], 0)
for t in range(len(ens_transformed_pix_distribs[0]))]
with tf.name_scope('gen_pix_distribs'):
assert len(transformed_pix_distribs) == len(masks)
gen_pix_distrib = tf.add_n([transformed_pix_distrib * mask
for transformed_pix_distrib, mask in zip(transformed_pix_distribs, masks)])
if self.hparams.renormalize_pixdistrib:
gen_pix_distrib /= tf.reduce_sum(gen_pix_distrib, axis=(1, 2), keepdims=True)
if 'states' in inputs:
assert NotImplementedError('No states!!')
with tf.name_scope('gen_states'):
with tf.variable_scope('state_pred'):
gen_state = dense(state_action, inputs['states'].shape[-1].value)
gen_image = tf.concat(ensemble_gen_image, axis=0)
transformed_images = [tf.concat([ensemble_transformed_images[j][i] for j in range(self.num_ensembles)], axis=0)
for i in range(len(ensemble_transformed_images[0]))]
outputs = {'gen_images': gen_image,
'gen_inputs': gen_input,
'transformed_images': tf.stack(transformed_images, axis=-1),
'masks': tf.stack(masks, axis=-1)}
#for ens in (self.num_ensembles):
# outputs['transformed_images_{}'.format(ens)] = tf.stack(ensemble_transformed_images[ens], axis=-1)
# outputs['gen_image_{}'.format(ens)] = ensemble_gen_image[ens]
# outputs['masks_{}'.format(ens)] = ensemble_masks[ens]
if 'pix_distribs' in inputs:
outputs['gen_pix_distribs'] = gen_pix_distrib
outputs['transformed_pix_distribs'] = tf.stack(transformed_pix_distribs, axis=-1)
if 'states' in inputs:
outputs['gen_states'] = gen_state
if self.hparams.transformation == 'flow':
#outputs['gen_flows'] = flows
concat_flows_rgb = []
for ens in range(self.num_ensembles):
flows = ensemble_flows[ens]
flows_transposed = tf.transpose(flows, [0, 1, 2, 4, 3])
flows_rgb_transposed = tf_utils.flow_to_rgb(flows_transposed)
flows_rgb = tf.transpose(flows_rgb_transposed, [0, 1, 2, 4, 3])
concat_flows_rgb.append(flows_rgb)
outputs['gen_flows'] = tf.concat(ensemble_flows, axis=0)
outputs['gen_flows_rgb'] = tf.concat(concat_flows_rgb, axis=0)
new_states = {'time': time + 1,
'gen_image': gen_image,
'last_images': last_images,
'conv_rnn_states': new_conv_rnn_states}
if 'zs' in inputs and self.hparams.use_rnn_z:
new_states['rnn_z_state'] = rnn_z_state
if 'pix_distribs' in inputs:
new_states['gen_pix_distrib'] = gen_pix_distrib
new_states['last_pix_distribs'] = last_pix_distribs
if 'states' in inputs:
new_states['gen_state'] = gen_state
return outputs, new_states
def generator_fn(inputs, outputs_enc=None, hparams=None):
batch_size = inputs['images'].shape[1].value
inputs = {name: tf_utils.maybe_pad_or_slice(input, hparams.sequence_length - 1)
for name, input in inputs.items()}
if hparams.nz:
def sample_zs():
if outputs_enc is None:
zs = tf.random_normal([hparams.sequence_length - 1, batch_size, hparams.nz], 0, 1)
else:
enc_zs_mu = outputs_enc['enc_zs_mu']
enc_zs_log_sigma_sq = outputs_enc['enc_zs_log_sigma_sq']
eps = tf.random_normal([hparams.sequence_length - 1, batch_size, hparams.nz], 0, 1)
zs = enc_zs_mu + tf.sqrt(tf.exp(enc_zs_log_sigma_sq)) * eps
return zs
inputs['zs'] = sample_zs()
else:
if outputs_enc is not None:
raise ValueError('outputs_enc has to be None when nz is 0.')
cell = DNACell(inputs, hparams)
outputs, _ = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32,
swap_memory=False, time_major=True)
if hparams.nz:
inputs_samples = {name: flatten(tf.tile(input[:, None], [1, hparams.num_samples] + [1] * (input.shape.ndims - 1)), 1, 2)
for name, input in inputs.items() if name != 'zs'}
inputs_samples['zs'] = tf.concat([sample_zs() for _ in range(hparams.num_samples)], axis=1)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
cell_samples = DNACell(inputs_samples, hparams)
outputs_samples, _ = tf.nn.dynamic_rnn(cell_samples, inputs_samples, dtype=tf.float32,
swap_memory=False, time_major=True)
gen_images_samples = outputs_samples['gen_images']
gen_images_samples = tf.stack(tf.split(gen_images_samples, hparams.num_samples, axis=1), axis=-1)
gen_images_samples_avg = tf.reduce_mean(gen_images_samples, axis=-1)
outputs['gen_images_samples'] = gen_images_samples
outputs['gen_images_samples_avg'] = gen_images_samples_avg
# the RNN outputs generated images from time step 1 to sequence_length,
# but generator_fn should only return images past context_frames
outputs = {name: output[hparams.context_frames - 1:] for name, output in outputs.items()}
gen_images = outputs['gen_images']
outputs['ground_truth_sampling_mean'] = tf.reduce_mean(tf.to_float(cell.ground_truth[hparams.context_frames:]))
return gen_images, outputs
class EnsembleSAVPVideoPredictionModel(VideoPredictionModel):
def __init__(self, *args, **kwargs):
super(EnsembleSAVPVideoPredictionModel, self).__init__(
generator_fn, discriminator_fn, encoder_fn, *args, **kwargs)
if self.hparams.e_net == 'none' or self.hparams.nz == 0:
self.encoder_fn = None
if self.hparams.d_net == 'none':
self.discriminator_fn = None
self.deterministic = not self.hparams.nz
def get_default_hparams_dict(self):
default_hparams = super(EnsembleSAVPVideoPredictionModel, self).get_default_hparams_dict()
hparams = dict(
l1_weight=1.0,
l2_weight=0.0,
d_net='legacy',
n_layers=3,
ndf=32,
norm_layer='instance',
d_downsample_layer='conv_pool2d',
d_conditional=True,
d_use_gt_inputs=True,
ngf=32,
downsample_layer='conv_pool2d',
upsample_layer='upsample_conv2d',
transformation='cdna',
kernel_size=(5, 5),
dilation_rate=(1, 1),
where_add='all',
rnn='lstm',
conv_rnn='lstm',
num_transformed_images=4,
last_frames=1,
prev_image_background=True,
first_image_background=True,
context_images_background=False,
generate_scratch_image=True,
dependent_mask=True,
schedule_sampling='inverse_sigmoid',
schedule_sampling_k=900.0,
schedule_sampling_steps=(0, 100000),
e_net='n_layer',
use_e_rnn=False,
nz=8,
num_samples=8,
nef=64,
use_rnn_z=True,
ablation_conv_rnn_norm=False,
renormalize_pixdistrib=True,
num_ensembles=4
)
return dict(itertools.chain(default_hparams.items(), hparams.items()))
def parse_hparams(self, hparams_dict, hparams):
hparams = super(EnsembleSAVPVideoPredictionModel, self).parse_hparams(hparams_dict, hparams)
if self.mode == 'test':
def override_hparams_maybe(name, value):
orig_value = hparams.values()[name]
if orig_value != value:
print('Overriding hparams from %s=%r to %r for mode=%s.' %
(name, orig_value, value, self.mode))
hparams.set_hparam(name, value)
override_hparams_maybe('d_net', 'none')
override_hparams_maybe('e_net', 'none')
override_hparams_maybe('schedule_sampling', 'none')
return hparams
@property
def num_ensembles(self):
return self.hparams.num_ensembles
def apply_dna_kernels(image, kernels, dilation_rate=(1, 1)):
"""
Args:
image: A 4-D tensor of shape
`[batch, in_height, in_width, in_channels]`.
kernels: A 6-D of shape
`[batch, in_height, in_width, kernel_size[0], kernel_size[1], num_transformed_images]`.
Returns:
A list of `num_transformed_images` 4-D tensors, each of shape
`[batch, in_height, in_width, in_channels]`.
"""
dilation_rate = list(dilation_rate) if isinstance(dilation_rate, (tuple, list)) else [dilation_rate] * 2
batch_size, height, width, color_channels = image.get_shape().as_list()
batch_size, height, width, kernel_height, kernel_width, num_transformed_images = kernels.get_shape().as_list()
kernel_size = [kernel_height, kernel_width]
# Flatten the spatial dimensions.
kernels_reshaped = tf.reshape(kernels, [batch_size, height, width,
kernel_size[0] * kernel_size[1], num_transformed_images])
image_padded = pad2d(image, kernel_size, rate=dilation_rate, padding='SAME', mode='SYMMETRIC')
# Combine channel and batch dimensions into the first dimension.
image_transposed = tf.transpose(image_padded, [3, 0, 1, 2])
image_reshaped = flatten(image_transposed, 0, 1)[..., None]
patches_reshaped = tf.extract_image_patches(image_reshaped, ksizes=[1] + kernel_size + [1],
strides=[1] * 4, rates=[1] + dilation_rate + [1], padding='VALID')
# Separate channel and batch dimensions, and move channel dimension.
patches_transposed = tf.reshape(patches_reshaped, [color_channels, batch_size, height, width, kernel_size[0] * kernel_size[1]])
patches = tf.transpose(patches_transposed, [1, 2, 3, 0, 4])
# Reduce along the spatial dimensions of the kernel.
outputs = tf.matmul(patches, kernels_reshaped)
outputs = tf.unstack(outputs, axis=-1)
return outputs
def apply_cdna_kernels(image, kernels, dilation_rate=(1, 1)):
"""
Args:
image: A 4-D tensor of shape
`[batch, in_height, in_width, in_channels]`.
kernels: A 4-D of shape
`[batch, kernel_size[0], kernel_size[1], num_transformed_images]`.
Returns:
A list of `num_transformed_images` 4-D tensors, each of shape
`[batch, in_height, in_width, in_channels]`.
"""
batch_size, height, width, color_channels = image.get_shape().as_list()
batch_size, kernel_height, kernel_width, num_transformed_images = kernels.get_shape().as_list()
kernel_size = [kernel_height, kernel_width]
image_padded = pad2d(image, kernel_size, rate=dilation_rate, padding='SAME', mode='SYMMETRIC')
# Treat the color channel dimension as the batch dimension since the same
# transformation is applied to each color channel.
# Treat the batch dimension as the channel dimension so that
# depthwise_conv2d can apply a different transformation to each sample.
kernels = tf.transpose(kernels, [1, 2, 0, 3])
kernels = tf.reshape(kernels, [kernel_size[0], kernel_size[1], batch_size, num_transformed_images])
# Swap the batch and channel dimensions.
image_transposed = tf.transpose(image_padded, [3, 1, 2, 0])
# Transform image.
outputs = tf.nn.depthwise_conv2d(image_transposed, kernels, [1, 1, 1, 1], padding='VALID', rate=dilation_rate)
# Transpose the dimensions to where they belong.
outputs = tf.reshape(outputs, [color_channels, height, width, batch_size, num_transformed_images])
outputs = tf.transpose(outputs, [4, 3, 1, 2, 0])
outputs = tf.unstack(outputs, axis=0)
return outputs
def apply_kernels(image, kernels, dilation_rate=(1, 1)):
"""
Args:
image: A 4-D tensor of shape
`[batch, in_height, in_width, in_channels]`.
kernels: A 4-D or 6-D tensor of shape
`[batch, kernel_size[0], kernel_size[1], num_transformed_images]` or
`[batch, in_height, in_width, kernel_size[0], kernel_size[1], num_transformed_images]`.
Returns:
A list of `num_transformed_images` 4-D tensors, each of shape
`[batch, in_height, in_width, in_channels]`.
"""
if isinstance(image, list):
image_list = image
kernels_list = tf.split(kernels, len(image_list), axis=-1)
outputs = []
for image, kernels in zip(image_list, kernels_list):
outputs.extend(apply_kernels(image, kernels))
else:
if len(kernels.get_shape()) == 4:
outputs = apply_cdna_kernels(image, kernels, dilation_rate=dilation_rate)
elif len(kernels.get_shape()) == 6:
outputs = apply_dna_kernels(image, kernels, dilation_rate=dilation_rate)
else:
raise ValueError
return outputs
def apply_flows(image, flows):
if isinstance(image, list):
image_list = image
flows_list = tf.split(flows, len(image_list), axis=-1)
outputs = []
for image, flows in zip(image_list, flows_list):
outputs.extend(apply_flows(image, flows))
else:
flows = tf.unstack(flows, axis=-1)
outputs = [flow_ops.image_warp(image, flow) for flow in flows]
return outputs
def identity_kernel(kernel_size):
kh, kw = kernel_size
kernel = np.zeros(kernel_size)
def center_slice(k):
if k % 2 == 0:
return slice(k // 2 - 1, k // 2 + 1)
else:
return slice(k // 2, k // 2 + 1)
kernel[center_slice(kh), center_slice(kw)] = 1.0
kernel /= np.sum(kernel)
return kernel
|
#
# Copyright 2008 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
import sys, math
import fft_swig as fft
from fft_swig import window
try:
from gnuradio import filter
except ImportError:
sys.stderr.write('fft.logpwrfft required gr-filter.\n')
sys.exit(1)
class _logpwrfft_base(gr.hier_block2):
"""
Create a log10(abs(fft)) stream chain, with real or complex input.
"""
def __init__(self, sample_rate, fft_size, ref_scale, frame_rate, avg_alpha, average, win=None):
"""
Create an log10(abs(fft)) stream chain.
Provide access to the setting the filter and sample rate.
Args:
sample_rate: Incoming stream sample rate
fft_size: Number of FFT bins
ref_scale: Sets 0 dB value input amplitude
frame_rate: Output frame rate
avg_alpha: FFT averaging (over time) constant [0.0-1.0]
average: Whether to average [True, False]
win: the window taps generation function
"""
gr.hier_block2.__init__(self, self._name,
gr.io_signature(1, 1, self._item_size), # Input signature
gr.io_signature(1, 1, gr.sizeof_float*fft_size)) # Output signature
self._sd = blocks.stream_to_vector_decimator(item_size=self._item_size,
sample_rate=sample_rate,
vec_rate=frame_rate,
vec_len=fft_size)
if win is None: win = window.blackmanharris
fft_window = win(fft_size)
fft = self._fft_block[0](fft_size, True, fft_window)
window_power = sum(map(lambda x: x*x, fft_window))
c2magsq = blocks.complex_to_mag_squared(fft_size)
self._avg = filter.single_pole_iir_filter_ff(1.0, fft_size)
self._log = blocks.nlog10_ff(10, fft_size,
-20*math.log10(fft_size) # Adjust for number of bins
-10*math.log10(float(window_power)/fft_size) # Adjust for windowing loss
-20*math.log10(float(ref_scale)/2)) # Adjust for reference scale
self.connect(self, self._sd, fft, c2magsq, self._avg, self._log, self)
self._average = average
self._avg_alpha = avg_alpha
self.set_avg_alpha(avg_alpha)
self.set_average(average)
def set_decimation(self, decim):
"""
Set the decimation on stream decimator.
Args:
decim: the new decimation
"""
self._sd.set_decimation(decim)
def set_vec_rate(self, vec_rate):
"""
Set the vector rate on stream decimator.
Args:
vec_rate: the new vector rate
"""
self._sd.set_vec_rate(vec_rate)
def set_sample_rate(self, sample_rate):
"""
Set the new sampling rate
Args:
sample_rate: the new rate
"""
self._sd.set_sample_rate(sample_rate)
def set_average(self, average):
"""
Set the averaging filter on/off.
Args:
average: true to set averaging on
"""
self._average = average
if self._average:
self._avg.set_taps(self._avg_alpha)
else:
self._avg.set_taps(1.0)
def set_avg_alpha(self, avg_alpha):
"""
Set the average alpha and set the taps if average was on.
Args:
avg_alpha: the new iir filter tap
"""
self._avg_alpha = avg_alpha
self.set_average(self._average)
def sample_rate(self):
"""
Return the current sample rate.
"""
return self._sd.sample_rate()
def decimation(self):
"""
Return the current decimation.
"""
return self._sd.decimation()
def frame_rate(self):
"""
Return the current frame rate.
"""
return self._sd.frame_rate()
def average(self):
"""
Return whether or not averaging is being performed.
"""
return self._average
def avg_alpha(self):
"""
Return averaging filter constant.
"""
return self._avg_alpha
class logpwrfft_f(_logpwrfft_base):
"""
Create an fft block chain, with real input.
"""
_name = "logpwrfft_f"
_item_size = gr.sizeof_float
_fft_block = (fft.fft_vfc, )
class logpwrfft_c(_logpwrfft_base):
"""
Create an fft block chain, with complex input.
"""
_name = "logpwrfft_c"
_item_size = gr.sizeof_gr_complex
_fft_block = (fft.fft_vcc, )
|
# -*- coding: utf-8 -*-
from PyQt4 import QtGui, QtCore
from gui.list.specieslist import speciesList
from core.localisation import _
class speciesListDialog(QtGui.QDialog):
"""
Class to list the current world's species.
"""
_table = None
def __init__(self, parent, app):
"""
Initialisation of the window, creates the GUI and displays the window.
"""
QtGui.QDialog.__init__(self, parent)
self._app = app
self._parent = parent
self._editedRow = None
self.initUI()
self.setWindowTitle(_('LIST_SPECIES_DIALOG_TITLE'))
self.setModal(True)
self.show()
def initUI(self):
"""
Creates the GUI.
The GUI is composed of a table listing the existing species, a form to
add a species and a button to close the window.
"""
layout = QtGui.QVBoxLayout(self)
self._table = speciesList(self, self._app)
self._table.entityDeleted.connect(self._table.setData)
self._table.cellDoubleClicked.connect(self.editRow)
closeButton = QtGui.QPushButton(_('CLOSE_BUTTON'))
closeButton.clicked.connect(self.close)
form = self.creationForm()
layout.addWidget(self._table)
layout.addLayout(form)
layout.addWidget(closeButton)
self.setLayout(layout)
def editRow(self, x, y):
"""
Method to populate the species form with a double clicked row,
to edit it.
"""
row = self._table.getRowValues(x)
self._editedRow = row['internalName']
self._populateForm(row)
def creationForm(self):
"""
Method which creates the form to add a species.
Returns a layout containing the form elements.
"""
form = QtGui.QGridLayout()
self._messageLabel = QtGui.QLabel()
self._messageLabel.setWordWrap(True)
nameLabel = QtGui.QLabel(_('SPECIES_NAME_LABEL'))
self._nameField = QtGui.QLineEdit()
self._nameField.textChanged.connect(self.updateCreateButton)
internalNameLabel = QtGui.QLabel(_('SPECIES_INTERNAL_NAME_LABEL'))
self._internalNameField = QtGui.QLineEdit()
self._internalNameField.textChanged.connect(self.updateCreateButton)
descriptionLabel = QtGui.QLabel(_('SPECIES_DESCRIPTION_LABEL'))
self._descriptionField = QtGui.QTextEdit()
self._descriptionField.textChanged.connect(self.updateCreateButton)
self._saveButton = QtGui.QPushButton(_('CREATE_BUTTON'))
self._saveButton.setEnabled(False)
self._saveButton.clicked.connect(self.createSpecies)
form.addWidget(self._messageLabel, 0, 0, 1, 2)
form.addWidget(internalNameLabel, 1, 0)
form.addWidget(self._internalNameField, 1, 1)
form.addWidget(nameLabel, 2, 0)
form.addWidget(self._nameField, 2, 1)
form.addWidget(descriptionLabel, 3, 0)
form.addWidget(self._descriptionField, 3, 1)
form.addWidget(self._saveButton, 4, 1)
return form
def updateCreateButton(self):
"""
Method called when the form's fields are edited. The "create" button is
enabled if the name field is not empty.
"""
self._saveButton.setEnabled(
str(self._nameField.text()).strip() != ""
and str(self._internalNameField.text()).strip() != ""
)
if self._editedRow is not None:
self._saveButton.setText(_('EDIT_BUTTON'))
def createSpecies(self):
"""
Method called when the "create" button is pressed. The filled data are
checked and if they are correct, the species is created.
"""
internalName = str(self._internalNameField.text()).strip()
name = str(self._nameField.text()).strip()
description = str(self._descriptionField.toPlainText()).strip()
if name is "" or internalName is "":
return False
if self._editedRow != internalName and self._app.hasSpeciesWithName(internalName):
self.displayMessage(_('ERROR_ALREADY_EXISTING_SPECIES'))
return False
elif self._editedRow is not None:
self._app.deleteSpecies(self._editedRow)
self._app.addSpecies(internalName, {
'name': name,
'description': description,
'internalName': internalName
})
self._cleanForm()
self._table.setData()
def _populateForm(self, row):
"""
Populate the form with the values in the dict row
"""
self._internalNameField.setText(row['internalName'])
self._nameField.setText(row['name'])
self._descriptionField.setText(row['description'])
def _cleanForm(self):
"""
Clean the form with empty values and reset the create button value
"""
self._internalNameField.setText('')
self._nameField.setText('')
self._descriptionField.setText('')
self._saveButton.setText(_('CREATE_BUTTON'))
self._editedRow = None
def displayMessage(self, message):
"""
Method to display a message in the window.
"""
self._messageLabel.setText(message)
self.adjustSize()
|
# Copyright 2020 Board of Trustees of the University of Illinois.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import flask
import auth_middleware
from dotenv import load_dotenv
# Load .env file
load_dotenv()
app = flask.Flask(__name__)
################################################
# Call middleware here!
################################################
app.before_request(auth_middleware.authenticate)
@app.route('/')
def hello_world():
return "Hello world!"
@app.route('/authorize')
def authorize():
id_info = auth_middleware.authenticate()
group_list = None
try:
group_list = id_info["uiucedu_is_member_of"]
except Exception:
raise Exception('Token did not contain the group info')
auth_middleware.authorize(group_list)
auth_middleware.authorize(auth_middleware.rokwire_app_config_manager_group)
return "authorized"
if __name__ == '__main__':
app.run(port=5000, debug=True)
|
import click
@click.group()
@click.option("-p", "--path", default=None)
def iunctus(path):
pass
from iunctus.cli.add import add
from iunctus.cli.new import new
from iunctus.cli.show import show
iunctus.add_command(add)
iunctus.add_command(new)
iunctus.add_command(show)
if __name__ == "__main__":
iunctus()
|
# encoding: utf-8
"""
Test suite for the docx.api module
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
import pytest
from docx.api import Document
from docx.enum.text import WD_BREAK
from docx.opc.constants import CONTENT_TYPE as CT, RELATIONSHIP_TYPE as RT
from docx.package import Package
from docx.parts.document import DocumentPart, InlineShapes
from docx.parts.numbering import NumberingPart
from docx.parts.styles import StylesPart
from docx.table import Table
from docx.text import Paragraph, Run
from .unitutil import (
instance_mock, class_mock, method_mock, property_mock, var_mock
)
class DescribeDocument(object):
def it_opens_a_docx_on_construction(self, init_fixture):
docx_, open_ = init_fixture
document = Document(docx_)
open_.assert_called_once_with(docx_)
assert isinstance(document, Document)
def it_can_open_a_docx_file(self, open_fixture):
docx_, Package_, package_, document_part_ = open_fixture
document_part, package = Document._open(docx_)
Package_.open.assert_called_once_with(docx_)
assert document_part is document_part
assert package is package_
def it_opens_default_template_if_no_file_provided(
self, Package_, default_docx_):
Document._open(None)
Package_.open.assert_called_once_with(default_docx_)
def it_should_raise_if_not_a_Word_file(self, Package_, package_, docx_):
package_.main_document.content_type = 'foobar'
with pytest.raises(ValueError):
Document._open(docx_)
def it_can_add_a_heading(self, add_heading_fixture):
document, add_paragraph_, p_, text, level, style = add_heading_fixture
p = document.add_heading(text, level)
add_paragraph_.assert_called_once_with(text, style)
assert p is p_
def it_should_raise_on_heading_level_out_of_range(self, document):
with pytest.raises(ValueError):
document.add_heading(level=-1)
with pytest.raises(ValueError):
document.add_heading(level=10)
def it_can_add_an_empty_paragraph(self, add_empty_paragraph_fixture):
document, document_part_, p_ = add_empty_paragraph_fixture
p = document.add_paragraph()
document_part_.add_paragraph.assert_called_once_with()
assert p is p_
def it_can_add_a_paragraph_of_text(self, add_text_paragraph_fixture):
document, text, p_, r_ = add_text_paragraph_fixture
p = document.add_paragraph(text)
p.add_run.assert_called_once_with()
r_.add_text.assert_called_once_with(text)
def it_can_add_a_styled_paragraph(self, add_styled_paragraph_fixture):
document, style, p_ = add_styled_paragraph_fixture
p = document.add_paragraph(style=style)
assert p.style == style
def it_can_add_a_page_break(self, add_page_break_fixture):
document, document_part_, p_, r_ = add_page_break_fixture
p = document.add_page_break()
document_part_.add_paragraph.assert_called_once_with()
p_.add_run.assert_called_once_with()
r_.add_break.assert_called_once_with(WD_BREAK.PAGE)
assert p is p_
def it_can_add_a_picture(self, add_picture_fixture):
(document, image_path, width, height, inline_shapes_, expected_width,
expected_height, picture_) = add_picture_fixture
picture = document.add_picture(image_path, width, height)
inline_shapes_.add_picture.assert_called_once_with(image_path)
assert picture.width == expected_width
assert picture.height == expected_height
assert picture is picture_
def it_can_add_a_table(self, add_table_fixture):
document, rows, cols, style, document_part_, expected_style, table_ = (
add_table_fixture
)
table = document.add_table(rows, cols, style)
document_part_.add_table.assert_called_once_with(rows, cols)
assert table.style == expected_style
assert table == table_
def it_provides_access_to_the_document_inline_shapes(self, document):
body = document.inline_shapes
assert body is document._document_part.inline_shapes
def it_provides_access_to_the_document_paragraphs(
self, paragraphs_fixture):
document, paragraphs_ = paragraphs_fixture
paragraphs = document.paragraphs
assert paragraphs is paragraphs_
def it_provides_access_to_the_document_tables(self, tables_fixture):
document, tables_ = tables_fixture
tables = document.tables
assert tables is tables_
def it_can_save_the_package(self, save_fixture):
document, package_, file_ = save_fixture
document.save(file_)
package_.save.assert_called_once_with(file_)
def it_provides_access_to_the_numbering_part(self, num_part_get_fixture):
document, document_part_, numbering_part_ = num_part_get_fixture
numbering_part = document.numbering_part
document_part_.part_related_by.assert_called_once_with(RT.NUMBERING)
assert numbering_part is numbering_part_
def it_creates_numbering_part_on_first_access_if_not_present(
self, num_part_create_fixture):
document, NumberingPart_, document_part_, numbering_part_ = (
num_part_create_fixture
)
numbering_part = document.numbering_part
NumberingPart_.new.assert_called_once_with()
document_part_.relate_to.assert_called_once_with(
numbering_part_, RT.NUMBERING
)
assert numbering_part is numbering_part_
def it_provides_access_to_the_styles_part(self, styles_part_get_fixture):
document, document_part_, styles_part_ = styles_part_get_fixture
styles_part = document.styles_part
document_part_.part_related_by.assert_called_once_with(RT.STYLES)
assert styles_part is styles_part_
def it_creates_styles_part_on_first_access_if_not_present(
self, styles_part_create_fixture):
document, StylesPart_, document_part_, styles_part_ = (
styles_part_create_fixture
)
styles_part = document.styles_part
StylesPart_.new.assert_called_once_with()
document_part_.relate_to.assert_called_once_with(
styles_part_, RT.STYLES
)
assert styles_part is styles_part_
# fixtures -------------------------------------------------------
@pytest.fixture(params=[0, 1, 2, 5, 9])
def add_heading_fixture(self, request, document, add_paragraph_, p_):
level = request.param
text = 'Spam vs. Bacon'
style = 'Title' if level == 0 else 'Heading%d' % level
return document, add_paragraph_, p_, text, level, style
@pytest.fixture
def add_empty_paragraph_fixture(self, document, document_part_, p_):
return document, document_part_, p_
@pytest.fixture
def add_page_break_fixture(self, document, document_part_, p_, r_):
return document, document_part_, p_, r_
@pytest.fixture
def add_paragraph_(self, request, p_):
return method_mock(
request, Document, 'add_paragraph', return_value=p_
)
@pytest.fixture(params=[
(None, None, 200, 100),
(1000, 500, 1000, 500),
(2000, None, 2000, 1000),
(None, 2000, 4000, 2000),
])
def add_picture_fixture(
self, request, Document_inline_shapes_, inline_shapes_):
width, height, expected_width, expected_height = request.param
document = Document()
image_path_ = instance_mock(request, str, name='image_path_')
picture_ = inline_shapes_.add_picture.return_value
picture_.width, picture_.height = 200, 100
return (
document, image_path_, width, height, inline_shapes_,
expected_width, expected_height, picture_
)
@pytest.fixture
def add_styled_paragraph_fixture(self, document, p_):
style = 'foobaresque'
return document, style, p_
@pytest.fixture(params=[None, 'LightShading-Accent1', 'foobar'])
def add_table_fixture(self, request, document, document_part_, table_):
rows, cols = 4, 2
style = expected_style = request.param
return (
document, rows, cols, style, document_part_, expected_style,
table_
)
@pytest.fixture
def add_text_paragraph_fixture(self, document, p_, r_):
text = 'foobar\rbarfoo'
return document, text, p_, r_
@pytest.fixture
def default_docx_(self, request):
return var_mock(request, 'docx.api._default_docx_path')
@pytest.fixture
def Document_inline_shapes_(self, request, inline_shapes_):
return property_mock(
request, Document, 'inline_shapes', return_value=inline_shapes_
)
@pytest.fixture
def document(self, open_):
return Document()
@pytest.fixture
def document_part_(self, request, p_, paragraphs_, table_, tables_):
document_part_ = instance_mock(
request, DocumentPart, content_type=CT.WML_DOCUMENT_MAIN
)
document_part_.add_paragraph.return_value = p_
document_part_.add_table.return_value = table_
document_part_.paragraphs = paragraphs_
document_part_.tables = tables_
return document_part_
@pytest.fixture
def docx_(self, request):
return instance_mock(request, str)
@pytest.fixture
def init_fixture(self, docx_, open_):
return docx_, open_
@pytest.fixture
def inline_shapes_(self, request):
return instance_mock(request, InlineShapes)
@pytest.fixture
def num_part_create_fixture(
self, document, NumberingPart_, document_part_, numbering_part_):
document_part_.part_related_by.side_effect = KeyError
return document, NumberingPart_, document_part_, numbering_part_
@pytest.fixture
def num_part_get_fixture(self, document, document_part_, numbering_part_):
document_part_.part_related_by.return_value = numbering_part_
return document, document_part_, numbering_part_
@pytest.fixture
def NumberingPart_(self, request, numbering_part_):
NumberingPart_ = class_mock(request, 'docx.api.NumberingPart')
NumberingPart_.new.return_value = numbering_part_
return NumberingPart_
@pytest.fixture
def numbering_part_(self, request):
return instance_mock(request, NumberingPart)
@pytest.fixture
def open_(self, request, document_part_, package_):
return method_mock(
request, Document, '_open',
return_value=(document_part_, package_)
)
@pytest.fixture
def open_fixture(self, docx_, Package_, package_, document_part_):
return docx_, Package_, package_, document_part_
@pytest.fixture
def p_(self, request, r_):
p_ = instance_mock(request, Paragraph)
p_.add_run.return_value = r_
return p_
@pytest.fixture
def Package_(self, request, package_):
Package_ = class_mock(request, 'docx.api.Package')
Package_.open.return_value = package_
return Package_
@pytest.fixture
def package_(self, request, document_part_):
package_ = instance_mock(request, Package)
package_.main_document = document_part_
return package_
@pytest.fixture
def paragraphs_(self, request):
return instance_mock(request, list)
@pytest.fixture
def paragraphs_fixture(self, document, paragraphs_):
return document, paragraphs_
@pytest.fixture
def r_(self, request):
return instance_mock(request, Run)
@pytest.fixture
def save_fixture(self, request, open_, package_):
file_ = instance_mock(request, str)
document = Document()
return document, package_, file_
@pytest.fixture
def StylesPart_(self, request, styles_part_):
StylesPart_ = class_mock(request, 'docx.api.StylesPart')
StylesPart_.new.return_value = styles_part_
return StylesPart_
@pytest.fixture
def styles_part_(self, request):
return instance_mock(request, StylesPart)
@pytest.fixture
def styles_part_create_fixture(
self, document, StylesPart_, document_part_, styles_part_):
document_part_.part_related_by.side_effect = KeyError
return document, StylesPart_, document_part_, styles_part_
@pytest.fixture
def styles_part_get_fixture(self, document, document_part_, styles_part_):
document_part_.part_related_by.return_value = styles_part_
return document, document_part_, styles_part_
@pytest.fixture
def table_(self, request):
return instance_mock(request, Table, style=None)
@pytest.fixture
def tables_(self, request):
return instance_mock(request, list)
@pytest.fixture
def tables_fixture(self, document, tables_):
return document, tables_
|
#!/usr/bin/python3
from cc.parser import Parser
from cc.optimizer import Optimizer
from cc.translator import Translator
from asm.assembler import Assembler
from link.linker import Linker, BinaryType
import sys
if __name__ == '__main__':
c_files = []
asm_files = []
stop_at_comp = False
for file in sys.argv:
if file.endswith('.c'):
c_files.append(file)
elif file.endswith('.dasm'):
asm_files.append(file)
elif file == '-S':
stop_at_comp = True
objects = []
got_errors = False
for cf in c_files:
with open(cf, 'r') as f:
code = f.read()
p = Parser(code, filename=cf)
p.parse()
print(p.func_list[1])
if not p.got_errors:
opt = Optimizer(p)
opt.optimize()
trans = Translator(p)
trans.translate()
insts = trans.get_instructions()
code = '\n'.join(insts)
if stop_at_comp:
with open(cf + '.dasm', 'w') as f:
f.write(code)
else:
asm = Assembler(code, cf)
asm.parse()
asm.fix_labels()
if not asm.got_errors:
objects.append(asm.get_object())
else:
got_errors = True
else:
got_errors = True
for af in asm_files:
with open(af, 'r') as f:
code = f.read()
if not stop_at_comp:
asm = Assembler(code, filename=af)
asm.parse()
asm.fix_labels()
if not asm.got_errors:
objects.append(asm.get_object())
else:
got_errors = True
if stop_at_comp or got_errors:
exit(0)
# Link it all
linker = Linker()
for object in objects:
linker.append_object(object)
linker.link(BinaryType.RAW, {})
if not linker.got_errors:
for word in linker.get_words():
print(hex(word)[2:].zfill(4))
|
def map_signals(signals):
m = {}
for s in ['a', 'b', 'c', 'd', 'e', 'f', 'g']:
appearances = list(filter(lambda x: s in x, signals))
if len(appearances) == 6:
m[s] = 'b'
continue
elif len(appearances) == 4:
m[s] = 'e'
continue
elif len(appearances) == 9:
m[s] = 'f'
continue
else:
appearances_length = list(map(lambda x: len(x), appearances))
appearances_length.sort()
if appearances_length == [3, 5, 5, 5, 6, 6, 6, 7]:
m[s] = 'a'
continue
elif appearances_length == [2, 3, 4, 5, 5, 6, 6, 7]:
m[s] = 'c'
continue
elif appearances_length == [4, 5, 5, 5, 6, 6, 7]:
m[s] = 'd'
continue
elif appearances_length == [5, 5, 5, 6, 6, 6, 7]:
m[s] = 'g'
continue
return m
def apply_mapping(mapping, display):
mapped = list(map(lambda c: mapping[c], display))
mapped.sort()
return "".join(mapped)
def decode_display(display):
if display == 'cf':
return 1
elif display == 'acdeg':
return 2
elif display == 'acdfg':
return 3
elif display == 'bcdf':
return 4
elif display == 'abdfg':
return 5
elif display == 'abdefg':
return 6
elif display == 'acf':
return 7
elif display == 'abcdefg':
return 8
elif display == 'abcdfg':
return 9
elif display == 'abcefg':
return 0
s = 0
with open('input') as f:
for line in f:
signals, output = tuple(map(lambda x: x.split(), line.split('|')))
mapping = map_signals(signals)
mapped_output = list(map(lambda x: apply_mapping(mapping, x), output))
decoded_output = list(map(lambda x: decode_display(x), mapped_output))
i = 0
for d in decoded_output[::-1]:
s += d * pow(10, i)
i += 1
print(s)
|
# -*- coding: utf-8 -*-
from scrapy import Spider, Request
from ..items import ChannelItem, RoomItem
import json
class HuyaSpider(Spider):
name = 'huya'
allowed_domains = ['huya.com']
start_urls = [
'http://www.huya.com/g'
]
custom_settings = {
'SITE': {
'code': 'huya',
'name': '虎牙',
'description': '虎牙直播-中国领先的互动直播平台',
'url': 'http://www.huya.com',
'image': 'http://a.dwstatic.com/huya/main/img/logo.png',
'show_seq': 2
}
}
def parse(self, response):
room_query_list = []
for a_element in response.xpath('//li[@class="game-list-item"]/a'):
url = a_element.xpath('@href').extract_first()
short = url[url.rfind('/') + 1:]
report_attr = json.loads(a_element.xpath('@report').extract_first())
office_id = report_attr['game_id']
img_element = a_element.xpath('img')[0]
name = img_element.xpath('@title').extract_first()
image = img_element.xpath('@data-original').extract_first()
yield ChannelItem({
'office_id': office_id,
'short': short,
'name': name,
'image': image,
'url': url
})
url = 'http://www.huya.com/cache.php?m=LiveList&do=getLiveListByPage&tagAll=0&gameId={}'.format(office_id)
room_query_list.append({'url': url, 'channel': short, 'page': 1})
for room_query in room_query_list:
yield Request('{}&page=1'.format(room_query['url']), callback=self.parse_room_list, meta=room_query)
def parse_room_list(self, response):
room_list = json.loads(response.text)['data']['datas']
if isinstance(room_list, list):
for rjson in room_list:
yield RoomItem({
'office_id': rjson['privateHost'],
'name': rjson['introduction'],
'image': rjson['screenshot'],
'url': response.urljoin(rjson['privateHost']),
'online': int(rjson['totalCount']) if rjson['totalCount'].isdigit() else 0,
'host': rjson['nick'],
'channel': rjson['gameHostName']
})
if len(room_list) > 0:
next_meta = dict(response.meta, page=response.meta['page'] + 1)
yield Request('{}&page={}'.format(next_meta['url'], str(next_meta['page'])),
callback=self.parse_room_list, meta=next_meta)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import functools
import numpy as np
from arch.api.model_manager import manager as model_manager
from arch.api.proto import feature_selection_meta_pb2, feature_selection_param_pb2
from arch.api.proto.feature_binning_meta_pb2 import FeatureBinningMeta
from federatedml.statistic.data_overview import get_features_shape
from federatedml.util import abnormal_detection
from federatedml.util.transfer_variable import HeteroFeatureSelectionTransferVariable
class BaseHeteroFeatureSelection(object):
def __init__(self, params):
self.params = params
self.transfer_variable = HeteroFeatureSelectionTransferVariable()
self.cols = params.select_cols
self.filter_method = params.filter_method
self.header = []
def _save_meta(self, name, namespace):
unique_param_dict = copy.deepcopy(self.params.unique_param.__dict__)
unique_param = feature_selection_meta_pb2.UniqueValueParam(**unique_param_dict)
iv_dict = copy.deepcopy(self.params.iv_param.__dict__)
bin_dict = copy.deepcopy(self.params.iv_param.bin_param.__dict__)
del bin_dict['process_method']
del bin_dict['result_table']
del bin_dict['result_namespace']
del bin_dict['display_result']
if bin_dict['cols'] == -1:
bin_dict['cols'] = self.cols
bin_param = FeatureBinningMeta()
iv_dict["bin_param"] = bin_param
iv_param = feature_selection_meta_pb2.IVSelectionParam(**iv_dict)
coe_param_dict = copy.deepcopy(self.params.coe_param.__dict__)
coe_param = feature_selection_meta_pb2.CoeffOfVarSelectionParam(**coe_param_dict)
outlier_param_dict = copy.deepcopy(self.params.outlier_param.__dict__)
outlier_param = feature_selection_meta_pb2.OutlierColsSelectionParam(**outlier_param_dict)
meta_protobuf_obj = feature_selection_meta_pb2.FeatureSelectionMeta(filter_methods=self.filter_method,
local_only=self.params.local_only,
select_cols=self.header,
unique_param=unique_param,
iv_param=iv_param,
coe_param=coe_param,
outlier_param=outlier_param)
buffer_type = "HeteroFeatureSelectionGuest.meta"
model_manager.save_model(buffer_type=buffer_type,
proto_buffer=meta_protobuf_obj,
name=name,
namespace=namespace)
return buffer_type
def save_model(self, name, namespace):
meta_buffer_type = self._save_meta(name, namespace)
result_obj = feature_selection_param_pb2.FeatureSelectionParam(results=self.results)
param_buffer_type = "HeteroFeatureSelectionGuest.param"
model_manager.save_model(buffer_type=param_buffer_type,
proto_buffer=result_obj,
name=name,
namespace=namespace)
return [(meta_buffer_type, param_buffer_type)]
def load_model(self, name, namespace):
result_obj = feature_selection_param_pb2.FeatureSelectionParam()
model_manager.read_model(buffer_type="HeteroFeatureSelectionGuest.param",
proto_buffer=result_obj,
name=name,
namespace=namespace)
self.results = list(result_obj.results)
if len(self.results) == 0:
self.left_cols = -1
else:
result_obj = self.results[-1]
self.left_cols = list(result_obj.left_cols)
@staticmethod
def select_cols(instance, left_cols):
new_feature = []
for col in left_cols:
new_feature.append(instance.features[col])
new_feature = np.array(new_feature)
instance.features = new_feature
return instance
def _reset_header(self):
"""
The cols and left_cols record the index of header. Replace header based on the change
between left_cols and cols.
"""
new_header = []
for col in self.left_cols:
idx = self.cols.index(col)
new_header.append(self.header[idx])
self.header = new_header
def _transfer_data(self, data_instances):
if self.left_cols == -1:
features_shape = get_features_shape(data_instances)
if features_shape is None:
raise RuntimeError('Cannot get feature shape, please check input data')
self.left_cols = [i for i in range(features_shape)]
f = functools.partial(self.select_cols,
left_cols=self.left_cols)
new_data = data_instances.mapValues(f)
self._reset_header()
return new_data
def _abnormal_detection(self, data_instances):
"""
Make sure input data_instances is valid.
"""
abnormal_detection.empty_table_detection(data_instances)
abnormal_detection.empty_feature_detection(data_instances)
def set_flowid(self, flowid="samole"):
self.flowid = flowid
self.transfer_variable.set_flowid(self.flowid)
|
import unittest
from conans.test.utils.tools import TestClient, TestServer
from conans.paths import CONANFILE, BUILD_INFO
from conans.util.files import load, save
import os
from conans.test.utils.test_files import temp_folder
from conans.model.info import ConanInfo
conanfile = """from conans import ConanFile
class ConanToolPackage(ConanFile):
name = "conantool"
version = "1.0"
exports = "*"
build_policy = "missing"
def package(self):
self.copy("*")
def package_info(self):
self.env_info.PYTHONPATH.append(self.package_folder)
"""
test = """def foo(output):
output.info("Hello Foo")
def bar(output):
output.info("Hello Bar")
def baz(output):
output.info("Hello Baz")
def boom(output):
output.info("Hello Boom")
"""
reuse = """from conans import ConanFile, tools
class ToolsTest(ConanFile):
name = "Consumer"
version = "0.1"
requires = "conantool/1.0@lasote/stable"
def source(self):
with tools.pythonpath(self):
import mytest
mytest.baz(self.output)
def build(self):
with tools.pythonpath(self):
import mytest
mytest.foo(self.output)
def package(self):
with tools.pythonpath(self):
import mytest
mytest.boom(self.output)
def package_info(self):
with tools.pythonpath(self):
import mytest
mytest.bar(self.output)
"""
class PythonBuildTest(unittest.TestCase):
def reuse_test(self):
client = TestClient()
client.save({CONANFILE: conanfile, "__init__.py": "", "mytest.py": test})
client.run("export lasote/stable")
client.save({CONANFILE: reuse}, clean_first=True)
client.run("install .")
self.assertNotIn("Hello Bar", client.user_io.out) # IMPORTANT!! WTF? Why this test was passing? Why I'm missing?
self.assertNotIn("Hello Foo", client.user_io.out)
client.run("build .")
self.assertNotIn("Hello Bar", client.user_io.out)
self.assertIn("Hello Foo", client.user_io.out)
client.run("export lasote/stable")
client.run("install Consumer/0.1@lasote/stable --build")
lines = [line.split(":")[1] for line in str(client.user_io.out).splitlines()
if line.startswith("Consumer/0.1@lasote/stable: Hello")]
self.assertEqual([' Hello Baz', ' Hello Foo', ' Hello Boom', ' Hello Bar'],
lines)
def upload_reuse_test(self):
server = TestServer()
servers = {"default": server}
client = TestClient(servers=servers, users={"default": [("lasote", "mypass")]})
client.save({CONANFILE: conanfile, "__init__.py": "", "mytest.py": test})
client.run("export lasote/stable")
client.save({CONANFILE: reuse}, clean_first=True)
client.run("export lasote/stable")
client.run("install Consumer/0.1@lasote/stable --build")
lines = [line.split(":")[1] for line in str(client.user_io.out).splitlines()
if line.startswith("Consumer/0.1@lasote/stable: Hello")]
self.assertEqual([' Hello Baz', ' Hello Foo', ' Hello Boom', ' Hello Bar'],
lines)
client.run("upload conantool/1.0@lasote/stable --all")
client.run("remove * -f")
client.run("search")
self.assertNotIn("lasote/stable", client.user_io.out)
client.run("export lasote/stable")
client.run("install Consumer/0.1@lasote/stable --build")
lines = [line.split(":")[1] for line in str(client.user_io.out).splitlines()
if line.startswith("Consumer/0.1@lasote/stable: Hello")]
self.assertEqual([' Hello Baz', ' Hello Foo', ' Hello Boom', ' Hello Bar'],
lines)
# Try again, just in case
client.run("upload conantool/1.0@lasote/stable --all")
client.run("remove * -f -r=default")
client.run("upload conantool/1.0@lasote/stable --all")
def basic_install_test(self):
client = TestClient()
client.save({CONANFILE: conanfile, "__init__.py": "", "mytest.py": test})
client.run("export lasote/stable")
client.save({CONANFILE: reuse}, clean_first=True)
client.run("export lasote/stable")
client.run("install Consumer/0.1@lasote/stable --build")
lines = [line.split(":")[1] for line in str(client.user_io.out).splitlines()
if line.startswith("Consumer/0.1@lasote/stable: Hello")]
self.assertEqual([' Hello Baz', ' Hello Foo', ' Hello Boom', ' Hello Bar'],
lines)
def basic_package_test(self):
client = TestClient()
client.save({CONANFILE: conanfile, "__init__.py": "", "mytest.py": test})
client.run("export lasote/stable")
client.save({CONANFILE: reuse}, clean_first=True)
client.run("export lasote/stable")
client.run("install Consumer/0.1@lasote/stable --build", ignore_error=True)
lines = [line.split(":")[1] for line in str(client.user_io.out).splitlines()
if line.startswith("Consumer/0.1@lasote/stable: Hello")]
self.assertEqual([' Hello Baz', ' Hello Foo', ' Hello Boom', ' Hello Bar'],
lines)
def basic_source_test(self):
client = TestClient()
client.save({CONANFILE: conanfile, "__init__.py": "", "mytest.py": test})
client.run("export lasote/stable")
client.save({CONANFILE: reuse}, clean_first=True)
client.run("install .")
client.run("source .")
self.assertIn("Hello Baz", client.user_io.out)
self.assertNotIn("Hello Foo", client.user_io.out)
self.assertNotIn("Hello Bar", client.user_io.out)
self.assertNotIn("Hello Boom", client.user_io.out)
def errors_test(self):
client = TestClient()
client.save({CONANFILE: conanfile, "__init__.py": "", "mytest.py": test})
client.run("export lasote/stable")
client.save({CONANFILE: reuse}, clean_first=True)
client.run("install")
# BUILD_INFO is created by default, remove it to check message
os.remove(os.path.join(client.current_folder, BUILD_INFO))
client.run("source .", ignore_error=True)
# Output in py3 is different, uses single quote
# Now it works automatically without the env generator file
self.assertIn("No module named mytest", str(client.user_io.out).replace("'", ""))
def pythonpath_env_injection_test(self):
# Save some custom python code in custom dir
external_py = '''
def external_baz():
print("External baz")
'''
external_dir = temp_folder()
save(os.path.join(external_dir, "external.py"), external_py)
conanfile = """
import os
from conans import ConanFile, tools
class ConanToolPackage(ConanFile):
name = "conantool"
version = "1.0"
exports = "*"
build_policy = "missing"
def build(self):
with tools.pythonpath(self):
import external
external.external_baz()
def package(self):
self.copy("*")
def package_info(self):
self.env_info.PYTHONPATH.append(self.package_folder)
"""
client = TestClient()
client.save({CONANFILE: conanfile, "__init__.py": "", "mytest.py": test})
client.run("export lasote/stable")
# We can't build the package without our PYTHONPATH
self.assertRaises(Exception, client.run, "install conantool/1.0@lasote/stable --build missing")
# But we can inject the PYTHONPATH
client.run("install conantool/1.0@lasote/stable -e PYTHONPATH=['%s']" % external_dir)
# Now we want to reuse the package and access both external stuff and mytest.py stuff
reuse = """from conans import ConanFile, tools
class ToolsTest(ConanFile):
name = "Consumer"
version = "0.1"
requires = "conantool/1.0@lasote/stable"
def build(self):
with tools.pythonpath(self):
import mytest
mytest.foo(self.output)
import external
external.external_baz()
"""
client.save({CONANFILE: reuse})
client.run("install --build -e PYTHONPATH=['%s']" % external_dir)
client.run("build .")
info = ConanInfo.loads(load(os.path.join(client.current_folder, "conaninfo.txt")))
pythonpath = info.env_values.env_dicts(None)[1]["PYTHONPATH"]
self.assertEquals(os.path.normpath(pythonpath[0]), os.path.normpath(external_dir))
self.assertTrue(len(pythonpath), 2)
def external_python_with_simple_var_test(self):
client = TestClient()
conanfile_simple = """from conans import ConanFile, tools
class ToolsTest(ConanFile):
name = "Hello"
version = "0.1"
def build(self):
with tools.pythonpath(self):
import external
external.external_baz()
"""
external_py = '''
def external_baz():
print("External baz")
'''
external_dir = temp_folder()
save(os.path.join(external_dir, "external.py"), external_py)
client.save({CONANFILE: conanfile_simple})
client.run("export lasote/stable")
# Should work even if PYTHONPATH is not declared as [], only external resource needed
client.run('install Hello/0.1@lasote/stable --build missing -e PYTHONPATH="%s"' % external_dir)
|
import os
from uuid import uuid4
from datetime import datetime
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
from urllib.parse import urlparse, quote_plus, unquote
from flask import Flask, request, jsonify, render_template, redirect
# Init app
app = Flask(__name__)
basedir = os.path.abspath(os.path.dirname(__file__))
# Database
app.config['SQLALCHEMY_DATABASE_URI'] = "mysql+pymysql://VynHK8GlPX:ghq51q8BRG@remotemysql.com:3306/VynHK8GlPX"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# Init db
db = SQLAlchemy(app)
# Init ma
ma = Marshmallow(app)
# URL Class/Model
class Url(db.Model):
code = db.Column(db.String(100), primary_key=True)
url = db.Column(db.String(200), unique=True)
date = db.Column(db.Date())
def __init__(self, code, url, date):
self.code = code
self.url = url
self.date = date
# URL Schema
class UrlSchema(ma.Schema):
class Meta:
fields = ('code', 'url')
# Init schema
url_schema = UrlSchema()
urls_schema = UrlSchema(many=True)
# Create a URL
@app.route('/', methods=['GET', 'POST', 'DELETE'])
def add_url():
# Load Home page
if request.method == "GET":
return render_template('index.html', payload = None)
# Delete URL
if request.method == "DELETE":
# Get URL code
code = request.form['url']
url = Url.query.get(code)
# Delete record
db.session.delete(url)
db.session.commit()
return url_schema.jsonify(url)
try:
# Delete URL
if( request.form['delete']):
# Get URL code
code = request.form['url']
url = Url.query.get(code)
# Delete record
db.session.delete(url)
db.session.commit()
# Return new URL list
result = urls_schema.dump(Url.query.all())
return render_template("table.html", payload = result)
except:
url = request.form['url']
# Assert valid link is supplied
if(not bool(urlparse(url).scheme)):
return render_template('index.html', payload = "Error")
# Return Link if exists
for link in Url.query.all():
if(link.url == url):
return render_template('index.html', payload = link)
# Generate unique identifier
loop = True
while(loop):
code = str(uuid4())[:6]
# If code already exists create a new one, otherwise break
exists = Url.query.get(code)
if(exists == None):
loop = False
# Save new URL object
new_url = Url(code, url, datetime.now())
db.session.add(new_url)
db.session.commit()
return render_template('index.html', payload = new_url)
# Get Single URL
@app.route('/<code>', methods=['GET'])
def get_url(code):
# Fetch all URLs
if(code == "urls"):
all_urls = Url.query.all()
result = urls_schema.dump(all_urls)
return render_template("table.html", payload = result)
# Redirect to link
url = Url.query.get(code)
if(url is None):
return render_template('404.html')
url = url_schema.dump(url)
return redirect(url["url"])
# Run Server
if __name__ == '__main__':
# Run migrations
try:
db.create_all()
print("Migrated tables successfully")
except:
print("Error Migrating tables. They might already exist")
app.run(debug=False)
|
import csv
def importListFromCSV(filename, dest):
with open(filename) as csvDataFile:
csvReader = csv.reader(csvDataFile)
for row in csvReader:
dest.append(row[0])
def importTwoListsFromCSV(filename, dest1, dest2):
with open(filename) as csvDataFile:
csvReader = csv.reader(csvDataFile)
for row in csvReader:
dest1.append(row[0])
dest2.append(row[1])
def writeToFile(filename, cards):
f = open(filename, 'w')
for card in cards:
f.write("\"%s\"\n" % (card))
f.close()
def appendRowToFile(filename, myList):
f = open(filename, 'a')
for i in range(len(myList)):
if i < (len(myList) - 1):
f.write("\"%s\"," % (myList[i]))
else:
f.write("\"%s\"\n" % (myList[i])) #last element in row
f.close()
def isPlayer(userInput):
for player in playerNames:
if userInput == player:
return(True)
return(False)
def isSuspect(userInput):
for suspect in suspects:
if userInput == suspect:
return(True)
return(False)
def isWeapon(userInput):
for weapon in weapons:
if userInput == weapon:
return(True)
return(False)
def isRoom(userInput):
for room in rooms:
if userInput == room:
return(True)
return(False)
suspects = []
weapons = []
rooms = []
playerNames = []
playerTypes = []
numPlayerCards = []
importListFromCSV("setup/suspects.csv", suspects)
importListFromCSV("setup/weapons.csv", weapons)
importListFromCSV("setup/rooms.csv", rooms)
importTwoListsFromCSV("setup/playerNames.csv", playerNames, playerTypes)
numPlayers = len(playerNames)
numSuspects = len(suspects)
numWeapons = len(weapons)
numRooms = len(rooms)
allCards = suspects + weapons + rooms
numCards = len(allCards)
for i in range(numPlayers):
initialNumCards = 0
numPlayerCards.append(initialNumCards)
cardIndex = 0
while cardIndex < 18:
playerIndex = 0
while playerIndex < numPlayers:
if cardIndex >= 18:
break
numPlayerCards[playerIndex] = numPlayerCards[playerIndex] + 1
cardIndex = cardIndex + 1
playerIndex = playerIndex + 1
|
import requests
from bs4 import BeautifulSoup as soup
import pandas as pd
url = 'https://www.newegg.com/p/pl?Submit=StoreIM&Depa=1&Category=38'
uClient = requests.get(url)
page = uClient.content
uClient.close()
page_html = soup(page, 'html.parser')
container = page_html.find(class_='items-view is-grid')
Title = [item.img['title'] for item in container.select('.item-container .item-brand')]
Product_Name = [item.text for item in container.select('.item-container .item-title')]
Shipping = [item.text.strip() for item in container.select('.item-container .price-ship')]
Data = pd.DataFrame({
'TITLE': Title,
'Product_Name': Product_Name,
'Shipping': Shipping,
})
Data = Data.sort_values('TITLE')
Data.to_csv('web_scrap.csv', index = False)
|
import os
import sys
from operator import itemgetter
from methods import dates
from db.elastic import Elastic
from db.postgresql import PostgreSQL
GEONAMES_USERNAME = ""
if not GEONAMES_USERNAME:
print("Please fill out GEONAMES_USERNAME in geotag/config.py")
# sys.exit()
# Folder to save GeoNames data
GEONAMES_DIR = os.path.join('input', 'GeoNames')
# Refresh the GeoNames data on new preprocessing run
REFRESH_GEONAMES_TABLES = False
# Max lenght of n-grams to use for toponym recognition
MAX_NGRAM_LENGTH = 3
# Minimum lenght of one n-gram
MINIMUM_GRAM_LENGTH = 4
# Two locations are considered 'near' if below:
NEAR_DISTANCE = 200000 # m
# When multiple entities are mentioned in the same tweet, discard them if further apart than:
MAX_DISTANCE_ENTITIES_IN_SAME_TWEET = 200000 # m
# A tweet bbox and entity are considered a match if closer than:
MAX_DISTANCE_BBOX_CENTER = 200000 # m
# A tweet coodrindate and entity are considered a match if closer than:
MAX_DISTANCE_CITY_COORDINATE = 200000 # m
# Scores given for metadata matches (relative importance)
SCORE_TYPES = {
'coordinates match': 2,
'user home': 1,
'bbox': 2,
# 'time zone': .5,
'family': 3,
'utc_offset': .5
}
# Name of the PostgreSQL database (lowercase)
POSTGRESQL_DB = 'taggs'
# Name of the toponym resolution table
TOPONYM_RESOLUTION_TABLE = 'toponym_resolution_table'
# Refresh time of the realtime geotagging module
REAL_TIME_TAGGER_REFRESH_TIME = 300 # sec
# Name of the Elasticsearch index with tweets
TWEETS_INDEX = 'taggs'
# Name of the Elasticsearch index with toponyms
TOPONYM_INDEX = 'toponyms'
# Update tweets in the database with their locations (flag for testing purposes)
UPDATE = False
# Connect to databases
es_tweets = Elastic()
es_toponyms = es_tweets
pg_Geotag = PostgreSQL(POSTGRESQL_DB)
pg = PostgreSQL(POSTGRESQL_DB)
# The functions below are meant to connect to your database.
class TweetAnalyzerCustom:
# ID = ID of the tweet as str
# tweet = {
# 'date': '%a %b %d %H:%M:%S +0000 %Y',
# 'user': {
# 'id': user ID,
# 'location': user location,
# 'time zone': user time zone,
# },
# 'text': text in utf-8 - retweeted_status if retweet, otherwise text
# 'retweet': Boolean: True or False,
# 'lang': tweet language - must be available,
# 'coordinates': tweets coordinates if coordinates are available and coordinates are not 0, 0.
# 'bbox': tweet bbox as tuple if bbox is available: (West, South, East, North)
# }
def parse_tweet(self, tweet):
ID = tweet['_id']
tweet = tweet['_source']
tweet['date'] = dates.isoformat_2_date(tweet['date'])
return ID, tweet
class GeotagCustom:
"""Custom class for Geotag algorithm"""
def locations_to_commit(self, fully_resolved, update=UPDATE, index=TWEETS_INDEX):
"""Run through each tweet (ID) and its resolved locations and commit that to the database.
The function first checks with the cache if an update is neccesary"""
for ID, locations in fully_resolved.items():
locations = sorted(locations, key=itemgetter('toponym'))
# Check if locations key already exists in the tweets dictionary.
# If so, these are the locations in the database. And the code
# in the else-block is ran to see if one or more of the locations
# should be updated.
# If the locations key does not exist, the db_locations are None,
# and the new_locations are the currently assigned locations.
try:
db_locations = self.tweets[ID]['locations']
except KeyError:
db_locations = None
new_locations = locations
else:
new_locations = []
for db_loc in db_locations:
try:
new_locations.append(next(
loc for loc in locations
if loc['toponym'] == db_loc['toponym']
and loc['avg_score'] > db_loc['avg_score']
))
except StopIteration:
new_locations.append(db_loc)
for loc in locations:
try:
next(
db_loc for db_loc in db_locations
if db_loc['toponym'] == loc['toponym']
)
except StopIteration:
new_locations.append(loc)
finally:
if db_locations != new_locations:
self.tweets[ID]['locations'] = new_locations
if update:
body = {
'doc': {'locations': new_locations},
'_index': index,
'_type': 'tweet',
'_id': ID,
'_op_type': 'update'
}
yield body
def commit(self, tweets):
"""Commit tweets to the database"""
es_tweets.bulk_operation(tweets)
def analyze_tweets(self, query):
"""Function that analyzes all tweets using analyze_tweet, it is possible to change the number
of cores used for this function"""
tweets = es_tweets.scroll_through(index=TWEETS_INDEX, body=query, size=1000, source=True)
loc_tweets = dict(
[
(item[0], item[1]) for item in [
self.tweet_analyzer.analyze_tweet(tweet)
for tweet in tweets
] if item is not None
]
)
return loc_tweets
|
# ==================================================================================
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ==================================================================================
#
# concatenateVideos.py
# by: Rob Dachowski
# For questions or feedback, please contact robdac@amazon.com
#
# Purpose: This code uses the output of makevideo.bat to combine the clips into a short demo consisting of
# short subclips and some title frames
# Change Log:
# 6/29/2018: Initial version
#
# ==================================================================================
# Import everything needed to edit video clips
from moviepy.editor import *
from moviepy import editor
from moviepy.video.tools.subtitles import SubtitlesClip
#import moviepy.video.fx.all as vfx
from time import gmtime, strftime
# Load the clips outputed from makevideo.bat
print strftime("%H:%M:%S", gmtime()), "Reading video English clip..."
english = VideoFileClip("subtitledVideo-en.mp4")
english = english.subclip( 0, 15).set_duration(15)
print strftime("%H:%M:%S", gmtime()), "Reading video Spanish clip..."
spanish = VideoFileClip("subtitledVideo-es.mp4")
spanish = spanish.subclip( 15, 30).set_duration(15)
print strftime("%H:%M:%S", gmtime()), "Reading video German clip..."
german = VideoFileClip("subtitledVideo-de.mp4")
german = german.subclip( 30, 45).set_duration(15)
print strftime("%H:%M:%S", gmtime()), "Creating title..."
# Generate a text clip. You can customize the font, color, etc.
toptitle = TextClip("Creating Subtitles and Translations Using Amazon Services:\n\nAmazon Transcribe\nAmazon Translate\nAmazon Polly",fontsize=36,color='white', bg_color='black', method="caption", align="center", size=english.size)
toptitle.set_duration(5)
subtitle1 = TextClip("re:Invent 2017 Keynote Address",fontsize=36,color='white', bg_color='black', method="caption", align="center", size=english.size)
subtitle1.set_duration(5)
subtitle2 = TextClip( "\nAndy Jassy, President and CEO of Amazon Web Services", fontsize=28, color='white', bg_color='black', method="caption", align="center ", size=english.size)
subtitle2.set_duration(5)
# Composite the video clips into a title page
title = CompositeVideoClip( [ toptitle, subtitle1.set_start(5), subtitle2.set_start(9)] ).set_duration(15)
#Create text clips for the various different translations
est = TextClip("English Subtitles\nUsing Amazon Transcribe",fontsize=24,color='white', bg_color='black', method="caption", align="center", size=english.size)
est = est.set_pos('center').set_duration(2.5)
sst = TextClip("Spanish Subtitles\nUsing Amazon Transcribe, Amazon Translate, and Amazon Polly",fontsize=24,color='white', bg_color='black', method="caption", align="center", size=english.size)
sst = sst.set_pos('center').set_duration(2.5)
dst = TextClip("German Subtitles\nUsing Amazon Transcribe, Amazon Translate, and Amazon Polly",fontsize=24,color='white', bg_color='black', method="caption", align="center", size=english.size)
dst = dst.set_pos('center').set_duration(2.5)
print strftime("%H:%M:%S", gmtime()), "Concatenating videos"
# concatenate the various titles, subtitles, and clips together
combined = concatenate_videoclips( [title.crossfadeout(2), est, english, sst, spanish, dst, german] )
# Write the result to a file (many options available !)
print strftime("%H:%M:%S", gmtime()), "Writing concatnated video"
combined.write_videofile("combined.mp4", codec="libx264", audio_codec="aac", fps=24)
|
# coding=utf-8
__author__ = 'defence.zhang@gmail.com'
__date__ = "2018/11/20 下午9:38:00"
import sys
reload(sys)
sys.setdefaultencoding("utf8")
import os
from datetime import datetime
from tornado.ioloop import IOLoop
from tornado.log import app_log
from tornado.options import define, options, parse_command_line
from tornado.web import Application, RequestHandler
from motor.motor_tornado import MotorClient
define("port", default=8999, type=int, help="server listen port")
define("debug", default=True, type=bool, help="server run mode")
parse_command_line()
try:
from py import config
except ImportError:
print "please create py/config/%s.py and modify it correctly as py/config/simple.py ..." % ('local' if options.debug else 'online')
exit()
from py import wx
from py.admin import activity, login
class TestHandler(RequestHandler):
def get(self):
page = self.get_query_argument('page')
value = int(self.get_query_argument('value'))
self.render('wx/%s.html' % page, package={'value': value})
def main():
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), 'templates'),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
cookie_secret=datetime.today().strftime("CJ%Y"),
debug=options.debug,
autoescape=None
)
handlers = [
# (r'/wx/cj.html', wx.QrCodeHandler),
(r'/qrcode/(\w+)/(\w+)', wx.QrCodeHandler),
(r'/wx/post.json', wx.PostAddressHandler),
# 后台接口
(r'/activity', activity.ActivityHandler),
(r'/activity/(\w+)/qrcode', activity.QrCodePageHandler), # 二维码图像列表页面
(r'/activity/(\w+)/qrcode/(\w+)', activity.QRCodeHander), # GET 单个二维码图像, id, seq, salt
(r'/login', login.LoginHandler),
]
if options.debug:
handlers += [
(r'/test.html', TestHandler),
]
application = Application(handlers, **settings)
# Forks one process per CPU.
application.listen(options.port, xheaders=True) # .start(0)
# Now, in each child process, create a MotorClient.
application.settings['db'] = MotorClient(config.mongodb).cj
app_log.warning("waqu live start at port: %s" % options.port)
IOLoop.instance().start()
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# Copyright 2019–2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sygnal.notifications import ConcurrencyLimitedPushkin
from sygnal.utils import twisted_sleep
from tests.testutils import TestCase
DEVICE_GCM1_EXAMPLE = {
"app_id": "com.example.gcm",
"pushkey": "spqrg",
"pushkey_ts": 42,
}
DEVICE_GCM2_EXAMPLE = {
"app_id": "com.example.gcm",
"pushkey": "spqrh",
"pushkey_ts": 42,
}
DEVICE_APNS_EXAMPLE = {
"app_id": "com.example.apns",
"pushkey": "spqra",
"pushkey_ts": 42,
}
class SlowConcurrencyLimitedDummyPushkin(ConcurrencyLimitedPushkin):
async def _dispatch_notification_unlimited(self, n, device, context):
"""
We will deliver the notification to the mighty nobody
and we will take one second to do it, because we are slow!
"""
await twisted_sleep(1.0, self.sygnal.reactor)
return []
class ConcurrencyLimitTestCase(TestCase):
def config_setup(self, config):
super(ConcurrencyLimitTestCase, self).config_setup(config)
config["apps"]["com.example.gcm"] = {
"type": "tests.test_concurrency_limit.SlowConcurrencyLimitedDummyPushkin",
"inflight_request_limit": 1,
}
config["apps"]["com.example.apns"] = {
"type": "tests.test_concurrency_limit.SlowConcurrencyLimitedDummyPushkin",
"inflight_request_limit": 1,
}
def test_passes_under_limit_one(self):
"""
Tests that a push notification succeeds if it is under the limit.
"""
resp = self._request(self._make_dummy_notification([DEVICE_GCM1_EXAMPLE]))
self.assertEqual(resp, {"rejected": []})
def test_passes_under_limit_multiple_no_interfere(self):
"""
Tests that 2 push notifications succeed if they are to different
pushkins (so do not hit a per-pushkin limit).
"""
resp = self._request(
self._make_dummy_notification([DEVICE_GCM1_EXAMPLE, DEVICE_APNS_EXAMPLE])
)
self.assertEqual(resp, {"rejected": []})
def test_fails_when_limit_hit(self):
"""
Tests that 1 of 2 push notifications fail if they are to the same pushkins
(so do hit the per-pushkin limit of 1).
"""
resp = self._multi_requests(
[
self._make_dummy_notification([DEVICE_GCM1_EXAMPLE]),
self._make_dummy_notification([DEVICE_GCM2_EXAMPLE]),
]
)
# request 0 will succeed
self.assertEqual(resp[0], {"rejected": []})
# request 1 will fail because request 0 has filled the limit
self.assertEqual(resp[1], 502)
|
# -*- coding: utf-8 -*-
import os
from mootdx.utils import get_stock_market
from pytdx.exhq import TdxExHq_API
from pytdx.hq import TdxHq_API
# 股票市场
class Quotes(object):
@staticmethod
def factory(market='std', **kwargs):
if market=='ext':
return ExtQuotes(**kwargs)
elif market=='std':
return StdQuotes(**kwargs)
# 财务数据下载 affairs
@staticmethod
def financial(downdir='.'):
from pytdx.crawler.base_crawler import demo_reporthook
from pytdx.crawler.history_financial_crawler import HistoryFinancialCrawler
from pytdx.crawler.history_financial_crawler import HistoryFinancialListCrawler
crawler = HistoryFinancialListCrawler()
datacrawler = HistoryFinancialCrawler()
list_data = crawler.fetch_and_parse()
for x in tqdm(list_data):
downfile = os.path.join(downdir, x['filename'])
result = datacrawler.fetch_and_parse(reporthook=demo_reporthook, filename=x['filename'], path_to_download=downfile)
class StdQuotes(object):
"""股票市场实时行情"""
# __slots__ =
def __init__(self, **kwargs):
self.config = None
try:
self.config = json.loads(os.path.join(os.environ['HOME'], '.mootdx/config.josn'))
except Exception as e:
self.config = None
self.client = TdxHq_API(**kwargs)
if not self.config:
self.bestip = os.environ.setdefault("MOOTDX_SERVER", '202.108.253.131:7709')
self.bestip = self.bestip.split(':')
self.bestip[1] = int(self.bestip[1])
else:
self.bestip = self.config.get('SERVER')
# K线
def bars(self, symbol='000001', category='9', start='0', offset='100'):
'''
获取实时日K线数据
:param symbol: 股票代码
:param category: 数据类别
:param market: 证券市场
:param start: 开始位置
:param offset: 每次获取条数
:return: pd.dataFrame or None
'''
market = get_stock_market(symbol)
with self.client.connect(*self.bestip):
data = self.client.get_security_bars(
int(category), int(market), str(symbol), int(start), int(offset))
return self.client.to_df(data)
# 分时数据
def minute(self, symbol=''):
'''
获取实时分时数据
:param market: 证券市场
:param symbol: 股票代码
:return: pd.DataFrame
'''
market = get_stock_market(symbol)
with self.client.connect(*self.bestip):
data = self.client.get_minute_time_data(int(market), symbol)
return self.client.to_df(data)
# 分时历史数据
def minute_his(self, symbol='', datetime='20161209'):
'''
分时历史数据
:param market:
:param symbol:
:param datetime:
:return: pd.dataFrame or None
'''
market = get_stock_market(symbol)
with self.client.connect(*self.bestip):
data = self.client.get_history_minute_time_data(
int(market), symbol, datetime)
return self.client.to_df(data)
def trans(self, symbol='', start=0, offset=10):
'''
查询分笔成交
:param market: 市场代码
:param symbol: 股票代码
:param start: 起始位置
:param offset: 请求数量
:return: pd.dataFrame or None
'''
market = get_stock_market(symbol)
with self.client.connect(*self.bestip):
data = self.client.get_transaction_data(
int(market), symbol, int(start), int(market))
return self.client.to_df(data)
def trans_his(self, symbol='', start=0, offset=10, date=''):
'''
查询历史分笔成交
:param market: 市场代码
:param symbol: 股票代码
:param start: 起始位置
:param offset: 数量
:param date: 日期
:return: pd.dataFrame or None
'''
market = get_stock_market(symbol)
with self.client.connect(*self.bestip):
data = self.client.get_history_transaction_data(
int(market), symbol, int(start), int(offset), date)
return self.client.to_df(data)
def company(self, symbol='', detail='category', *args, **kwargs):
'''
企业信息获取
:param symbol:
:param detail:
:param args:
:param kwargs:
:return:
'''
pass
def company_category(self, symbol=''):
'''
查询公司信息目录
:param market: 市场代码
:param symbol: 股票代码
:return: pd.dataFrame or None
'''
market = get_stock_market(symbol)
with self.client.connect(*self.bestip):
data = self.client.get_company_info_category(int(market), symbol)
return self.client.to_df(data)
def company_content(self, symbol='', file='', start=0, offset=10):
'''
读取公司信息详情
:param market: 市场代码
:param symbol: 股票代码
:param file: 文件名
:param start: 起始位置
:param offset: 数量
:return: pd.dataFrame or None
'''
market = get_stock_market(symbol)
with self.client.connect(*self.bestip):
data = self.client.get_company_info_content(
int(market), symbol, file, int(start), int(offset))
return self.client.to_df(data)
def xdxr(self, symbol=''):
'''
读取除权除息信息
:param market: 市场代码
:param symbol: 股票代码
:return: pd.dataFrame or None
'''
market = get_stock_market(symbol)
with self.client.connect(*self.bestip):
data = self.client.get_xdxr_info(int(market), symbol)
return self.client.to_df(data)
def k(self, symbol='', begin=None, end=None):
'''
读取k线信息
:param symbol:
:param begin:
:param end:
:return: pd.dataFrame or None
'''
with self.client.connect(*self.bestip):
data = self.client.get_k_data(symbol, begin, end)
return data
def index(
self,
symbol='000001',
market='sh',
category='9',
start='0',
offset='100'):
'''
获取指数k线
K线种类:
- 0 5分钟K线
- 1 15分钟K线
- 2 30分钟K线
- 3 1小时K线
- 4 日K线
- 5 周K线
- 6 月K线
- 7 1分钟
- 8 1分钟K线
- 9 日K线
- 10 季K线
- 11 年K线
:param symbol: 股票代码
:param category: 数据类别
:param market: 证券市场
:param start: 开始位置
:param offset: 每次获取条数
:return: pd.dataFrame or None
'''
market = 1 if market == 'sh' else 0
with self.client.connect(*self.bestip):
data = self.client.get_index_bars(
int(category), int(market), str(symbol), int(start), int(offset))
return self.client.to_df(data)
def block(self, tofile="block.dat"):
'''
获取证券板块信息
:param tofile:
:return: pd.dataFrame or None
'''
with self.client.connect(*self.bestip):
data = self.client.get_and_parse_block_info(tofile)
return self.client.to_df(data)
def batch(self, method='', offset=100, *args, **kwargs):
'''
批量下载相关数据
:param method:
:param offset:
:return:
'''
pass
class ExtQuotes(object):
"""扩展市场实时行情"""
def __init__(self, **kwargs):
self.client = TdxExHq_API(**kwargs)
# self.bestip = os.environ.setdefault("MOOTDX_SERVER", '61.152.107.141:7727')
# self.bestip = kwargs.get("bestip", '202.108.253.131:7709')
# self.bestip = self.bestip.split(':')
self.bestip = ('202.108.253.131', 7709)
# self.bestip[1] = int(self.bestip[1])
def markets(self):
'''
获取实时市场列表
:return: pd.dataFrame or None
'''
with self.client.connect(*self.bestip):
data = self.client.get_markets()
return self.client.to_df(data)
return None
def quote5(self, market=47, symbol="IF1709"):
'''
查询五档行情
:return: pd.dataFrame or None
'''
with self.client.connect(*self.bestip):
data = self.client.get_instrument_quote(market, symbol)
return self.client.to_df(data)
def minute(self, market=47, symbol="IF1709"):
'''
查询五档行情
:return: pd.dataFrame or None
'''
with self.client.connect(*self.bestip):
data = self.client.get_minute_time_data(market, symbol)
return self.client.to_df(data)
def instrument(self, start=0, offset=100):
'''
查询代码列表
:param start:
:param offset:
:return: pd.dataFrame or None
'''
with self.client.connect(*self.bestip):
# nums = self.client.get_instrument_count()
data = self.client.get_instrument_info(int(start), int(offset))
return self.client.to_df(data)
|
'''deployments.py - azurerm functions for Deployments'''
from .restfns import do_get
from .settings import get_rm_endpoint, BASE_API
def list_deployment_operations(access_token, subscription_id, rg_name, deployment_name):
'''List all operations involved in a given deployment.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rg_name (str): Azure resource group name.
Returns:
HTTP response. JSON body.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rg_name,
'/providers/Microsoft.Resources/deployments/', deployment_name,
'/operations',
'?api-version=', BASE_API])
return do_get(endpoint, access_token)
def show_deployment(access_token, subscription_id, rg_name, deployment_name):
'''Show details for a named deployment.abs
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rg_name (str): Azure resource group name.
Returns:
HTTP response. JSON body.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rg_name,
'/providers/microsoft.resources/deployments/', deployment_name,
'?api-version=', BASE_API])
return do_get(endpoint, access_token)
|
# Author: Jean-Remi King <jeanremi.king@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
from .mixin import TransformerMixin
from .base import BaseEstimator
from ..time_frequency.tfr import _compute_tfr, _check_tfr_param
from ..utils import fill_doc, _check_option
@fill_doc
class TimeFrequency(TransformerMixin, BaseEstimator):
"""Time frequency transformer.
Time-frequency transform of times series along the last axis.
Parameters
----------
freqs : array-like of float, shape (n_freqs,)
The frequencies.
sfreq : float | int, default 1.0
Sampling frequency of the data.
method : 'multitaper' | 'morlet', default 'morlet'
The time-frequency method. 'morlet' convolves a Morlet wavelet.
'multitaper' uses Morlet wavelets windowed with multiple DPSS
multitapers.
n_cycles : float | array of float, default 7.0
Number of cycles in the Morlet wavelet. Fixed number
or one per frequency.
time_bandwidth : float, default None
If None and method=multitaper, will be set to 4.0 (3 tapers).
Time x (Full) Bandwidth product. Only applies if
method == 'multitaper'. The number of good tapers (low-bias) is
chosen automatically based on this to equal floor(time_bandwidth - 1).
use_fft : bool, default True
Use the FFT for convolutions or not.
decim : int | slice, default 1
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note:: Decimation may create aliasing artifacts, yet decimation
is done after the convolutions.
output : str, default 'complex'
* 'complex' : single trial complex.
* 'power' : single trial power.
* 'phase' : single trial phase.
%(n_jobs)s
The number of epochs to process at the same time. The parallelization
is implemented across channels.
%(verbose)s
See Also
--------
mne.time_frequency.tfr_morlet
mne.time_frequency.tfr_multitaper
"""
def __init__(self, freqs, sfreq=1.0, method='morlet', n_cycles=7.0,
time_bandwidth=None, use_fft=True, decim=1, output='complex',
n_jobs=1, verbose=None): # noqa: D102
"""Init TimeFrequency transformer."""
freqs, sfreq, _, n_cycles, time_bandwidth, decim = \
_check_tfr_param(freqs, sfreq, method, True, n_cycles,
time_bandwidth, use_fft, decim, output)
self.freqs = freqs
self.sfreq = sfreq
self.method = method
self.n_cycles = n_cycles
self.time_bandwidth = time_bandwidth
self.use_fft = use_fft
self.decim = decim
# Check that output is not an average metric (e.g. ITC)
_check_option('output', output, ['complex', 'power', 'phase'])
self.output = output
self.n_jobs = n_jobs
self.verbose = verbose
def fit_transform(self, X, y=None):
"""Time-frequency transform of times series along the last axis.
Parameters
----------
X : array, shape (n_samples, n_channels, n_times)
The training data samples. The channel dimension can be zero- or
1-dimensional.
y : None
For scikit-learn compatibility purposes.
Returns
-------
Xt : array, shape (n_samples, n_channels, n_freqs, n_times)
The time-frequency transform of the data, where n_channels can be
zero- or 1-dimensional.
"""
return self.fit(X, y).transform(X)
def fit(self, X, y=None): # noqa: D401
"""Do nothing (for scikit-learn compatibility purposes).
Parameters
----------
X : array, shape (n_samples, n_channels, n_times)
The training data.
y : array | None
The target values.
Returns
-------
self : object
Return self.
"""
return self
def transform(self, X):
"""Time-frequency transform of times series along the last axis.
Parameters
----------
X : array, shape (n_samples, n_channels, n_times)
The training data samples. The channel dimension can be zero- or
1-dimensional.
Returns
-------
Xt : array, shape (n_samples, n_channels, n_freqs, n_times)
The time-frequency transform of the data, where n_channels can be
zero- or 1-dimensional.
"""
# Ensure 3-dimensional X
shape = X.shape[1:-1]
if not shape:
X = X[:, np.newaxis, :]
# Compute time-frequency
Xt = _compute_tfr(X, self.freqs, self.sfreq, self.method,
self.n_cycles, True, self.time_bandwidth,
self.use_fft, self.decim, self.output, self.n_jobs,
self.verbose)
# Back to original shape
if not shape:
Xt = Xt[:, 0, :]
return Xt
|
# Generated by Django 3.2.6 on 2021-08-20 10:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='usersettings',
name='country_of_news',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='usersettings',
name='keywords',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='usersettings',
name='news_source',
field=models.TextField(blank=True, null=True),
),
]
|
'''
RandDist:
----------
This minimal package generates a list of int or float numbers within a specific range and steps with custom probability distribution. \n
It has two methods:\n
1. randint\n
2. randfloat\n
each take same parameters and output same data
'''
from randdist.generate import randint, randfloat
|
from time import sleep
from importlib import import_module
from bloody_hell_bot.core.bot import BloodyHellBot
def main(config_module='config.dev'):
config = import_module(config_module)
bot = BloodyHellBot(config)
bot.message_loop()
while 1:
sleep(10)
if __name__ == '__main__':
main()
|
"""
:Created: 24 August 2016
:Author: Lucas Connors
"""
from django.apps import apps
from django.contrib.auth.models import User
from django.db import connection
from django.db.migrations.executor import MigrationExecutor
from django.test import TransactionTestCase
from pigeon.test import RenderTestCase
from menu.models import Category, MenuItem
from order.models import Order, OrderMenuItem
from restaurant.models import Table
class SearedQuailTestCase(RenderTestCase):
USER_USERNAME = "jsmith"
USER_EMAIL = "jsmith@example.com"
USER_PASSWORD = "abc123"
CATEGORY_NAME = "Drinks"
SUBCATEGORY_NAME = "Soda"
MENU_ITEM_NAME = "Water"
MENU_ITEM_IN_SUBCATEGORY_NAME = "Cola"
TABLE_NUMBER = "A-1"
def setUp(self):
super().setUp()
# Create admin user
self.user = User.objects.create_user(
self.USER_USERNAME, email=self.USER_EMAIL, password=self.USER_PASSWORD
)
self.user.is_staff = True
self.user.is_superuser = True
self.user.save()
self.client.login(username=self.USER_USERNAME, password=self.USER_PASSWORD)
# Create initial instances
self.category = Category.objects.create(name=self.CATEGORY_NAME)
self.menu_item = MenuItem.objects.create(
category=self.category, name=self.MENU_ITEM_NAME
)
self.subcategory = Category.objects.create(
name=self.SUBCATEGORY_NAME, parent=self.category
)
self.menu_item_in_subcategory = MenuItem.objects.create(
category=self.subcategory, name=self.MENU_ITEM_IN_SUBCATEGORY_NAME
)
self.table = Table.objects.create(number=self.TABLE_NUMBER)
# Place an order
self.order = Order.objects.create(table=self.table)
OrderMenuItem.objects.create(order=self.order, menuitem=self.menu_item)
class MigrationTestCase(TransactionTestCase):
"""
Ref: https://www.caktusgroup.com/blog/2016/02/02/writing-unit-tests-django-migrations/
"""
migrate_from = None
migrate_to = None
@property
def app(self):
return apps.get_containing_app_config(type(self).__module__).name
def setUp(self):
# Verify that migration_from and migration_to are defined
assertion_error_message = (
"MigrationTestCase '{test_case_name}' must define migrate_from and migrate_to properties."
).format(test_case_name=type(self).__name__)
assert self.migrate_from and self.migrate_to, assertion_error_message
# Init MigrationExecutor
self.migrate_from = [(self.app, self.migrate_from)]
self.migrate_to = [(self.app, self.migrate_to)]
executor = MigrationExecutor(connection)
old_apps = executor.loader.project_state(self.migrate_from).apps
# Reverse to old migration
executor.migrate(self.migrate_from)
# Create model instances before migration runs
self.setUpBeforeMigration(old_apps)
# Run the migration to test
executor = MigrationExecutor(connection)
executor.migrate(self.migrate_to)
self.apps = executor.loader.project_state(self.migrate_to).apps
def setUpBeforeMigration(self, apps):
pass
class AdminWebTestCase(SearedQuailTestCase):
def get200s(self):
return ["/admin/"]
def testAdminLoginPageRenders(self):
self.client.logout()
self.assertResponseRedirects("/admin/", "/admin/login/")
|
expected = [(True, 0.9749367759562906, 0.0017697497084894365, 0.0001226379059079931, 0.00016402878019849648, 0.00012344246793706903, 0.00031922002005261777, 0.0005362138683486838, 0.0006850762300778668, 0.0004942567639564529, 0.00018708933024663262, 0.0006994743745435789, 0.00040398057664498856, 0.0012545958406222754, 0.0006995602759455857, 4.074887748553275e-10, 2.1752345554537253e-08, 6.893568743986756e-07, 0.10021777036808066, 0.019674268425336955, 2.509947805151569e-11, 9.258416945554019e-11, 5.856109102466708e-17, 0.67748413597752, 2.3914504100228754e-11, 7.522455917366477e-13, 7.344415091328211e-05, 0.0014396455494463365, 0.0012053384296394612, 0.0008866174046595724, 2.1555330316300734e-14, 0.9594279052184913, 0.0002074703959143888, 7.55481640150461e-05, 0.9999989976196331, 1.207923229416487e-05, 0.00027583827601894315, 0.0698270416107376, 0.00018453781411443678, 0.009863389098877339, 0.022123722654315392, 0.0098633890988774, 0.004592843796987434, 0.9998351236451024, 0.013951132732773557, 0.925405890765279, 0.9755569060880589, 0.14830565476181345, 0.0004521609628475594, 7.534120729066002e-05, 0.06131633436401962, 0.033330412715665114, 9.938586887440123e-22, 0.14249225481045538, 0.06296313323086282, 0.3582069667640977, 4.829929905968561e-15, 0.08375773817691123, 0.018975839977920975, 0.055286669276326375, 5.2299730302677424e-05, 5.695163496355349e-08, 0.0021522983926278924, 0.0, 1.142426579532403e-21, 2.4024551296122493e-23, 0.020326964585520954, 0.008721533485877977, 5.761429172990433e-21, 3.608752519347532e-15, 0.14598011853624576, 1.7355671790267907e-22, 1.1809375699556313e-10, 0.05998337856769929, 9.05498589442999e-08, 4.6097836658987035e-10, 0.1570723761512004, 0.6391707315526944, 0.023724068249895314, 0.08076015142621962, 0.02750088405103144, 0.026103288321088658, 0.12445962953209416, 0.07318164957419467, 0.16709687358966352, 0.07558100887358327, 8.786222326940566e-24, 0.01985439680376681, 0.030449466764316042, 0.21657492863434677, 5.301380942484303e-05, 1.426575645079419e-16, 0.037316039578400684, 6.952720170237173e-13, 0.0, 9.796908728381612e-13, 0.0002642813530468513, 1.204930599755324e-11, 2.863050056034425e-09, 0.008253922509688207, 0.35655697923220714, 2.9948775774335523e-06, 0.2776390123534972, 5.301380942484303e-05, 0.006171383087998217, 5.301380942484303e-05, 0.5, 0.3847104505818606, 5.301380942484303e-05, 5.301380942484303e-05, 0.16466451519846614, 5.301380942484303e-05, 0.9986534460857486, 0.39982063322767364, 0.03618098130844001, 5.708678464956527e-19, 3.323628039453297e-10, 9.641976432207614e-10, 7.105427357601002e-15, 5.837075859767282e-13, 1.1988064218527426e-20, 0.16507954777521547, 0.16704063109164155, 0.16649833422826882, 0.16648681554273437, 0.20286466138783588, 0.06936588086433818, 7.105427357601002e-15, 0.16834648002367886, 0.16798293243083, 6.871898605709248e-10, 0.0011815729111892548, 0.16433263444740043, 0.0008377769174453542, 0.16632573432045866, 0.1630341420539464, 0.16507954777521783, 9.569704921590424e-08, 3.4970892216197174e-08, 0.16820617469834753, 0.16580685767199677, 0.1673465945690054, 7.13964618592744e-07, 2.64115098167329e-12, 0.09991279113071351, 2.868092430219349e-10, 0.37773784802857724, 0.004506167784075659, 0.13325025138007268, 0.35822936499573854, 1.6148291592799069e-09, 1.875173153042101e-18, 2.0941053890943806e-07, 7.105427357601002e-15, 0.49926428135900186, 0.16492940199937894, 1.3174450829220094e-17, 2.1116435458495668e-16, 1.1681587528983658e-09, 3.2592359986208345e-22, 6.246014200977821e-10, 0.16814918247297933, 0.16545072935045158, 1.1711026222995016e-13, 0.0, 0.16466886776470133, 0.1669247276390117, 0.0, 5.100713274794273e-08, 7.105427357601002e-15, 0.1546541084142275, 2.794209383371835e-22, 0.0, 0.16763973281840122, 6.314992660308343e-25, 0.16818612974609914, 0.009088502666434949, 0.16836320220350243, 8.265423131146944e-11, 0.15634635438381983, 0.0, 0.0, 0.021135423572131908, 0.021135423572131908, 2.388155749350249e-20, 0.0, 8.336724500779247e-25, 5.301380942484303e-05, 0.1569510658025109, 4.034345030660423e-08, 1.552591960229748e-23, 1.5930611717254445e-17, 5.766100772128885e-14, 2.957989408969297e-11, 0.16837836924209137, 0.167380186090632, 1.4815146472600273e-18, 2.3241499374144116e-16, 4.7035980879892365e-08, 0.16663339666347546, 0.28139584922525007),
(True, 0.9759402110326365, 0.003859470808797959, 0.000365102267782451, 0.0007398589702052536, 0.000485497209632223, 0.000610736173920557, 0.0013191430838316181, 0.0012554726344615142, 0.001882471392918716, 0.0008014864991408377, 0.0021797740913533695, 0.0011754538798584655, 0.0029068132849091356, 0.0016100571186500735, 4.074887748553275e-10, 2.1752345554537253e-08, 6.893568743986756e-07, 0.10021777036808066, 0.019674268425336955, 2.509947805151569e-11, 0.628584800417054, 5.856109102466708e-17, 0.5855782913710358, 0.5639311333572717, 7.522455917366477e-13, 0.00019175865208437016, 0.2949063498433872, 0.15421138805901285, 0.04201667450090395, 0.1674686336627659, 0.9594279052184913, 0.00048091646393783017, 0.0001822718129598278, 0.9999989976196331, 9.959236613847391e-05, 0.0004905768654966154, 0.13221528111928338, 0.0004543011968957863, 0.009964082422744462, 0.02212145365983826, 0.009964082422744523, 0.004592843796987434, 0.9986661307738313, 0.009344685767824776, 0.9211560286991001, 0.9755593892703659, 0.19089489586703817, 0.001100203819200652, 0.000195353938538486, 0.06131633436401962, 0.033330412715665114, 9.938586887440123e-22, 0.14249225481045538, 0.06296313323086282, 0.3582069667640977, 4.829929905968561e-15, 0.08375773817691123, 0.018975839977920975, 0.055286669276326375, 5.2299730302677424e-05, 5.695163496355349e-08, 0.0021522983926278924, 0.0, 1.142426579532403e-21, 2.4024551296122493e-23, 0.023154605826211493, 0.008721533485877977, 5.761429172990433e-21, 3.608752519347532e-15, 0.14598011853624576, 1.7355671790267907e-22, 1.1809375699556313e-10, 0.05998337856769929, 9.05498589442999e-08, 4.6097836658987035e-10, 0.1570723761512004, 0.632758701730094, 0.04357690733133318, 0.08076015142621962, 0.02750088405103144, 0.026103288321088658, 0.12445962953209416, 0.07318164957419467, 0.16709687358966352, 0.07558100887358327, 8.786222326940566e-24, 0.12338336359658242, 0.030449466764316042, 0.21106179193952035, 5.301380942484303e-05, 1.426575645079419e-16, 0.037316039578400684, 6.952720170237173e-13, 0.0, 9.796908728381612e-13, 0.0002642813530468513, 1.204930599755324e-11, 0.9472168082942851, 0.07252908115375234, 0.2561373864373436, 2.9948775774335523e-06, 0.2776390123534972, 5.301380942484303e-05, 0.006171383087998217, 5.301380942484303e-05, 0.5, 0.3847104505818606, 5.301380942484303e-05, 5.301380942484303e-05, 0.16466451519846614, 5.301380942484303e-05, 0.9986534460857486, 0.39982063322767364, 0.039526361699395315, 5.708678464956527e-19, 3.323628039453297e-10, 9.641976432207614e-10, 7.105427357601002e-15, 5.837075859767282e-13, 1.1988064218527426e-20, 0.16507954777521547, 0.16704063109164155, 0.16649833422826882, 0.16648681554273437, 0.20286466138783588, 0.06936588086433818, 7.105427357601002e-15, 0.16834648002367886, 0.16798293243083, 6.871898605709248e-10, 0.0011815729111892548, 0.16433263444740043, 0.0008377769174453542, 0.16632573432045866, 0.1630341420539464, 0.16507954777521783, 9.569704921590424e-08, 3.4970892216197174e-08, 0.16820617469834753, 0.16580685767199677, 0.1673465945690054, 7.13964618592744e-07, 2.64115098167329e-12, 0.09991279113071351, 0.9995291135114167, 0.37773784802857724, 0.004506167784075659, 0.13325025138007268, 0.35822936499573854, 1.6148291592799069e-09, 1.875173153042101e-18, 2.0941053890943806e-07, 7.105427357601002e-15, 0.49926428135900186, 0.16492940199937894, 1.3174450829220094e-17, 2.1116435458495668e-16, 1.1681587528983658e-09, 3.2592359986208345e-22, 6.246014200977821e-10, 0.16814918247297933, 0.16545072935045158, 1.1711026222995016e-13, 0.0, 0.16466886776470133, 0.1669247276390117, 0.0, 5.100713274794273e-08, 7.105427357601002e-15, 0.1546541084142275, 2.794209383371835e-22, 0.0, 0.16763973281840122, 6.314992660308343e-25, 0.16818612974609914, 0.009088502666434949, 0.16836320220350243, 8.265423131146944e-11, 0.15634635438381983, 0.0, 0.0, 0.021135423572131908, 0.021135423572131908, 2.388155749350249e-20, 0.0, 8.336724500779247e-25, 5.301380942484303e-05, 0.1569510658025109, 4.034345030660423e-08, 1.552591960229748e-23, 1.5930611717254445e-17, 5.766100772128885e-14, 2.957989408969297e-11, 0.16837836924209137, 0.167380186090632, 1.4815146472600273e-18, 2.3241499374144116e-16, 4.7035980879892365e-08, 0.16663339666347546, 0.301803221006645),
(True, 0.9650930535627298, 0.004446882645999163, 0.000779830744354495, 0.0018291583684619643, 0.001156848194089809, 0.0013267424086233672, 0.0036299952076220402, 0.0027191890481778553, 0.0028542417683358585, 0.0012582815526366896, 0.005804597598461859, 0.0030752616287224883, 0.0064325637046985865, 0.0035802530886988467, 4.074887748553275e-10, 2.1752345554537253e-08, 6.893568743986756e-07, 0.10021777036808066, 0.019674268425336955, 0.1702061839526885, 0.628584800417054, 5.856109102466708e-17, 0.46476604902502333, 0.7772487345236945, 7.522455917366477e-13, 0.00043870678760216363, 0.5620371665742858, 0.35802358026758546, 0.11810551140986231, 0.31694305740570716, 0.9594279052184913, 0.000997592586663745, 0.0003918935200591895, 0.9999989976196331, 0.00044549328223300307, 0.002994722061278165, 0.1397420296811136, 0.0009922143825485518, 0.010060854870684866, 0.022121413589501402, 0.010060854870684931, 0.005169288695657137, 0.9974188774918888, 0.00795596400914535, 0.9123139305661856, 0.9755594331232822, 0.2331496431301822, 0.002330710525648013, 0.0004439356527548031, 0.06131633436401962, 0.033330412715665114, 9.938586887440123e-22, 0.14249225481045538, 0.06296313323086282, 0.3582069667640977, 4.829929905968561e-15, 0.08375773817691123, 0.018975839977920975, 0.055286669276326375, 5.2299730302677424e-05, 5.695163496355349e-08, 0.016145845513504845, 0.0, 1.142426579532403e-21, 2.4024551296122493e-23, 0.02650960597748037, 0.008721533485877977, 5.761429172990433e-21, 3.608752519347532e-15, 0.14598011853624576, 1.7355671790267907e-22, 1.1809375699556313e-10, 0.05998337856769929, 9.05498589442999e-08, 4.6097836658987035e-10, 0.1570723761512004, 0.6499506456961532, 0.06740843302484462, 0.08076015142621962, 0.02750088405103144, 0.026103288321088658, 0.12445962953209416, 0.07318164957419467, 0.16709687358966352, 0.07558100887358327, 8.786222326940566e-24, 0.2657390751934702, 0.030449466764316042, 0.21106179193952035, 5.301380942484303e-05, 1.426575645079419e-16, 0.037316039578400684, 6.952720170237173e-13, 0.0, 9.796908728381612e-13, 0.0002642813530468513, 0.32334960795567924, 2.863050056034425e-09, 0.20731642102779674, 0.2561373864373436, 2.9948775774335523e-06, 0.2776390123534972, 5.301380942484303e-05, 0.006171383087998217, 5.301380942484303e-05, 0.5, 0.3847104505818606, 5.301380942484303e-05, 5.301380942484303e-05, 0.16466451519846614, 5.301380942484303e-05, 0.9986534460857486, 0.39982063322767364, 0.04287931331076668, 5.708678464956527e-19, 3.323628039453297e-10, 9.641976432207614e-10, 7.105427357601002e-15, 5.837075859767282e-13, 1.1988064218527426e-20, 0.16507954777521547, 0.16704063109164155, 0.16649833422826882, 0.16648681554273437, 0.20286466138783588, 0.06936588086433818, 7.105427357601002e-15, 0.16834648002367886, 0.16798293243083, 6.871898605709248e-10, 0.0011815729111892548, 0.16433263444740043, 0.0008377769174453542, 0.16632573432045866, 0.1630341420539464, 0.16507954777521783, 9.569704921590424e-08, 3.4970892216197174e-08, 0.16820617469834753, 0.16580685767199677, 0.1673465945690054, 7.13964618592744e-07, 2.64115098167329e-12, 0.09991279113071351, 0.9995291135114167, 0.37773784802857724, 0.004506167784075659, 0.13325025138007268, 0.35822936499573854, 1.6148291592799069e-09, 1.875173153042101e-18, 2.0941053890943806e-07, 7.105427357601002e-15, 0.49926428135900186, 0.16492940199937894, 1.3174450829220094e-17, 2.1116435458495668e-16, 1.1681587528983658e-09, 3.2592359986208345e-22, 6.246014200977821e-10, 0.16814918247297933, 0.16545072935045158, 1.1711026222995016e-13, 0.0, 0.16466886776470133, 0.1669247276390117, 0.0, 5.100713274794273e-08, 7.105427357601002e-15, 0.1546541084142275, 2.794209383371835e-22, 0.0, 0.16763973281840122, 6.314992660308343e-25, 0.16818612974609914, 0.009088502666434949, 0.16836320220350243, 8.265423131146944e-11, 0.15634635438381983, 0.0, 0.0, 0.021135423572131908, 0.021135423572131908, 2.388155749350249e-20, 0.0, 8.336724500779247e-25, 5.301380942484303e-05, 0.1569510658025109, 4.034345030660423e-08, 1.552591960229748e-23, 1.5930611717254445e-17, 5.766100772128885e-14, 2.957989408969297e-11, 0.16837836924209137, 0.167380186090632, 1.4815146472600273e-18, 2.3241499374144116e-16, 4.7035980879892365e-08, 0.16663339666347546, 0.3773393505694206),
(True, 0.9446546136628688, 0.005119310675270623, 0.0015276758928106622, 0.00392465836781208, 0.0024628949384377947, 0.002496863087055844, 0.007719283133850986, 0.005161120533665423, 0.006306326014843984, 0.0029659593695567903, 0.008208755191274326, 0.0043563268699832215, 0.013937089127215597, 0.007862971477146348, 4.074887748553275e-10, 2.1752345554537253e-08, 6.893568743986756e-07, 0.10021777036808066, 0.019674268425336955, 2.509947805151569e-11, 0.8489495578328278, 5.856109102466708e-17, 1.0887143007578768e-06, 0.8435295694744586, 7.522455917366477e-13, 0.0009079339332145865, 0.5312223976455256, 0.4024482806188474, 0.2641612329547135, 0.456266555546848, 0.9594279052184913, 0.0019002202202663212, 0.0007711290852290978, 0.9999989976196331, 0.0014224289790193776, 0.011837839257462677, 0.24596355246958002, 0.0019768413472982995, 0.010109863367934919, 0.022452497772528456, 0.010109863367934986, 0.005376156708420288, 0.9980055594610482, 0.00759661067151978, 0.915906194109299, 0.9751971384408027, 0.3109742480528484, 0.004494865540960488, 0.0009129182903651183, 0.06131633436401962, 0.033330412715665114, 9.938586887440123e-22, 0.14249225481045538, 0.06296313323086282, 0.3582069667640977, 4.829929905968561e-15, 0.08375773817691123, 0.018975839977920975, 0.055286669276326375, 5.2299730302677424e-05, 5.695163496355349e-08, 0.05205690448505847, 0.0, 1.142426579532403e-21, 2.4024551296122493e-23, 0.03051334349281324, 0.008721533485877977, 5.761429172990433e-21, 3.608752519347532e-15, 0.14598011853624576, 1.7355671790267907e-22, 1.1809375699556313e-10, 0.05998337856769929, 9.05498589442999e-08, 4.6097836658987035e-10, 0.1570723761512004, 0.7245796787382166, 0.06740843302484462, 0.08076015142621962, 0.02750088405103144, 0.026103288321088658, 0.12445962953209416, 0.07318164957419467, 0.16709687358966352, 0.07558100887358327, 8.786222326940566e-24, 0.40937506641082566, 0.030449466764316042, 0.21106179193952035, 5.301380942484303e-05, 1.426575645079419e-16, 0.037316039578400684, 6.952720170237173e-13, 0.0, 9.796908728381612e-13, 0.0002642813530468513, 0.32334960795567924, 2.863050056034425e-09, 0.35071358560365845, 0.2561373864373436, 2.9948775774335523e-06, 0.2776390123534972, 5.301380942484303e-05, 0.006171383087998217, 5.301380942484303e-05, 0.5, 0.3847104505818606, 5.301380942484303e-05, 5.301380942484303e-05, 0.16466451519846614, 5.301380942484303e-05, 0.9986534460857486, 0.39982063322767364, 0.04659909357942778, 5.708678464956527e-19, 3.323628039453297e-10, 9.641976432207614e-10, 7.105427357601002e-15, 5.837075859767282e-13, 1.1988064218527426e-20, 0.16507954777521547, 0.16704063109164155, 0.16649833422826882, 0.16648681554273437, 0.20286466138783588, 0.06936588086433818, 7.105427357601002e-15, 0.16834648002367886, 0.16798293243083, 6.871898605709248e-10, 0.0011815729111892548, 0.16433263444740043, 0.0008377769174453542, 0.16632573432045866, 0.1630341420539464, 0.16507954777521783, 9.569704921590424e-08, 3.4970892216197174e-08, 0.16820617469834753, 0.16580685767199677, 0.1673465945690054, 7.13964618592744e-07, 2.64115098167329e-12, 0.09991279113071351, 0.9995291135114167, 0.37773784802857724, 0.004506167784075659, 0.13325025138007268, 0.35822936499573854, 1.6148291592799069e-09, 1.875173153042101e-18, 2.0941053890943806e-07, 7.105427357601002e-15, 0.49926428135900186, 0.16492940199937894, 1.3174450829220094e-17, 2.1116435458495668e-16, 1.1681587528983658e-09, 3.2592359986208345e-22, 6.246014200977821e-10, 0.16814918247297933, 0.16545072935045158, 1.1711026222995016e-13, 0.0, 0.16466886776470133, 0.1669247276390117, 0.0, 5.100713274794273e-08, 7.105427357601002e-15, 0.1546541084142275, 2.794209383371835e-22, 0.0, 0.16763973281840122, 6.314992660308343e-25, 0.16818612974609914, 0.009088502666434949, 0.16836320220350243, 8.265423131146944e-11, 0.15634635438381983, 0.0, 0.0, 0.021135423572131908, 0.021135423572131908, 2.388155749350249e-20, 0.0, 8.336724500779247e-25, 5.301380942484303e-05, 0.1569510658025109, 4.034345030660423e-08, 1.552591960229748e-23, 1.5930611717254445e-17, 5.766100772128885e-14, 2.957989408969297e-11, 0.16837836924209137, 0.167380186090632, 1.4815146472600273e-18, 2.3241499374144116e-16, 4.7035980879892365e-08, 0.16663339666347546, 0.44752037155154656),
(True, 0.9148570430415872, 0.005878709943021506, 0.002792905542185768, 0.0075894368538410445, 0.004804630733613945, 0.004393849106506193, 0.014820685016774457, 0.00938744609118805, 0.011649786913846162, 0.005756131370649765, 0.015760775469750833, 0.008475895607575506, 0.018412094949817548, 0.01045916251868392, 4.074887748553275e-10, 2.1752345554537253e-08, 6.893568743986756e-07, 0.10021777036808066, 0.019674268425336955, 2.509947805151569e-11, 0.8982272708212705, 5.856109102466708e-17, 1.0887143007578768e-06, 0.8435295694744586, 7.522455917366477e-13, 0.0017366618823920316, 0.5063937185064782, 0.44008412539154557, 0.33731163450536594, 0.5722741725809378, 0.9594279052184913, 0.003382784376313171, 0.0014142827979925852, 0.9999989976196331, 0.0036644810806478172, 0.03518713434611009, 0.2900462801767014, 0.0036616728049894826, 0.010139645140688136, 0.022488799892004533, 0.010139645140688205, 0.005396729877744144, 0.9981743932734319, 0.00756251339989579, 0.9170964928541225, 0.9751574194747613, 0.40266237630266705, 0.008074137264439831, 0.001735836076723291, 0.06131633436401962, 0.033330412715665114, 9.938586887440123e-22, 0.14249225481045538, 0.06296313323086282, 0.3582069667640977, 4.829929905968561e-15, 0.08375773817691123, 0.018975839977920975, 0.055286669276326375, 5.2299730302677424e-05, 5.695163496355349e-08, 0.11744762389331218, 0.0, 1.142426579532403e-21, 2.4024551296122493e-23, 0.03531944981668431, 0.008721533485877977, 5.761429172990433e-21, 3.608752519347532e-15, 0.14598011853624576, 1.7355671790267907e-22, 1.1809375699556313e-10, 0.05998337856769929, 9.05498589442999e-08, 4.6097836658987035e-10, 0.1570723761512004, 0.7245796787382166, 0.1156448000491021, 0.08076015142621962, 0.02750088405103144, 0.026103288321088658, 0.12445962953209416, 0.07318164957419467, 0.16709687358966352, 0.07558100887358327, 8.786222326940566e-24, 0.5346551102427208, 0.030449466764316042, 0.21106179193952035, 5.301380942484303e-05, 1.426575645079419e-16, 0.037316039578400684, 6.952720170237173e-13, 0.0, 9.796908728381612e-13, 0.0002642813530468513, 0.32334960795567924, 2.863050056034425e-09, 0.4878677353402995, 0.2561373864373436, 2.9948775774335523e-06, 0.2776390123534972, 5.301380942484303e-05, 0.006171383087998217, 5.301380942484303e-05, 0.5, 0.3847104505818606, 5.301380942484303e-05, 5.301380942484303e-05, 0.16466451519846614, 5.301380942484303e-05, 0.9986534460857486, 0.39982063322767364, 0.05073274651199229, 5.708678464956527e-19, 3.323628039453297e-10, 9.641976432207614e-10, 7.105427357601002e-15, 5.837075859767282e-13, 1.1988064218527426e-20, 0.16507954777521547, 0.16704063109164155, 0.16649833422826882, 0.16648681554273437, 0.20286466138783588, 0.06936588086433818, 7.105427357601002e-15, 0.16834648002367886, 0.16798293243083, 6.871898605709248e-10, 0.0011815729111892548, 0.16433263444740043, 0.0008377769174453542, 0.16632573432045866, 0.1630341420539464, 0.16507954777521783, 9.569704921590424e-08, 3.4970892216197174e-08, 0.16820617469834753, 0.16580685767199677, 0.1673465945690054, 7.13964618592744e-07, 2.64115098167329e-12, 0.09991279113071351, 0.9995291135114167, 0.37773784802857724, 0.004506167784075659, 0.13325025138007268, 0.35822936499573854, 1.6148291592799069e-09, 1.875173153042101e-18, 2.0941053890943806e-07, 7.105427357601002e-15, 0.49926428135900186, 0.16492940199937894, 1.3174450829220094e-17, 2.1116435458495668e-16, 1.1681587528983658e-09, 3.2592359986208345e-22, 6.246014200977821e-10, 0.16814918247297933, 0.16545072935045158, 1.1711026222995016e-13, 0.0, 0.16466886776470133, 0.1669247276390117, 0.0, 5.100713274794273e-08, 7.105427357601002e-15, 0.1546541084142275, 2.794209383371835e-22, 0.0, 0.16763973281840122, 6.314992660308343e-25, 0.16818612974609914, 0.009088502666434949, 0.16836320220350243, 8.265423131146944e-11, 0.15634635438381983, 0.0, 0.0, 0.021135423572131908, 0.021135423572131908, 2.388155749350249e-20, 0.0, 8.336724500779247e-25, 5.301380942484303e-05, 0.1569510658025109, 4.034345030660423e-08, 1.552591960229748e-23, 1.5930611717254445e-17, 5.766100772128885e-14, 2.957989408969297e-11, 0.16837836924209137, 0.167380186090632, 1.4815146472600273e-18, 2.3241499374144116e-16, 4.7035980879892365e-08, 0.16663339666347546, 0.4992269885725577),
(True, 0.8773900422564633, 0.006737683689354504, 0.0048247619101781805, 0.013548449813255039, 0.00873744943462956, 0.007321229981810169, 0.02627235571148601, 0.01639147296275744, 0.02007666541207044, 0.010365829219755766, 0.025961087618306386, 0.014222212896890334, 0.03127417315766773, 0.018052437271065293, 4.074887748553275e-10, 2.1752345554537253e-08, 6.893568743986756e-07, 0.10021777036808066, 0.019674268425336955, 2.509947805151569e-11, 0.9307858067310805, 5.856109102466708e-17, 1.0887143007578768e-06, 0.8435295694744586, 7.522455917366477e-13, 0.003116819077375695, 0.2949063498433872, 0.32411464409514057, 0.29561449420793584, 0.663568247880646, 0.9594279052184913, 0.005699067374558093, 0.0024494762664879067, 0.9999989976196331, 0.008128639474665522, 0.08444739472522397, 0.4164401045970738, 0.006390743128115311, 0.0101597023577282, 0.02249012253280331, 0.01015970235772827, 0.005397537673694719, 0.9982470873588286, 0.0075611803707368655, 0.9176363519133461, 0.9751559723632889, 0.5031213401534023, 0.013686771604148678, 0.0030982888101555874, 0.06131633436401962, 0.033330412715665114, 9.938586887440123e-22, 0.14249225481045538, 0.06296313323086282, 0.3582069667640977, 4.829929905968561e-15, 0.08375773817691123, 0.018975839977920975, 0.055286669276326375, 5.2299730302677424e-05, 5.695163496355349e-08, 0.2142975169478418, 0.0, 1.142426579532403e-21, 2.4024551296122493e-23, 0.0411229744187351, 0.008721533485877977, 5.761429172990433e-21, 3.608752519347532e-15, 0.14598011853624576, 1.7355671790267907e-22, 1.1809375699556313e-10, 0.05998337856769929, 9.05498589442999e-08, 4.6097836658987035e-10, 0.1570723761512004, 0.7884317853270039, 0.1156448000491021, 0.08076015142621962, 0.02750088405103144, 0.026103288321088658, 0.12445962953209416, 0.07318164957419467, 0.16709687358966352, 0.07558100887358327, 8.786222326940566e-24, 0.6367707804864168, 0.030449466764316042, 0.21106179193952035, 5.301380942484303e-05, 1.426575645079419e-16, 0.037316039578400684, 6.952720170237173e-13, 0.0, 9.796908728381612e-13, 0.0002642813530468513, 0.32334960795567924, 2.863050056034425e-09, 0.6070824318152669, 0.2561373864373436, 2.9948775774335523e-06, 0.2776390123534972, 5.301380942484303e-05, 0.006171383087998217, 5.301380942484303e-05, 0.5, 0.3847104505818606, 5.301380942484303e-05, 5.301380942484303e-05, 0.16466451519846614, 5.301380942484303e-05, 0.9986534460857486, 0.39982063322767364, 0.055333770115485736, 5.708678464956527e-19, 3.323628039453297e-10, 9.641976432207614e-10, 7.105427357601002e-15, 5.837075859767282e-13, 1.1988064218527426e-20, 0.16507954777521547, 0.16704063109164155, 0.16649833422826882, 0.16648681554273437, 0.20286466138783588, 0.06936588086433818, 7.105427357601002e-15, 0.16834648002367886, 0.16798293243083, 6.871898605709248e-10, 0.0011815729111892548, 0.16433263444740043, 0.0008377769174453542, 0.16632573432045866, 0.1630341420539464, 0.16507954777521783, 9.569704921590424e-08, 3.4970892216197174e-08, 0.16820617469834753, 0.16580685767199677, 0.1673465945690054, 7.13964618592744e-07, 2.64115098167329e-12, 0.09991279113071351, 0.9995291135114167, 0.37773784802857724, 0.004506167784075659, 0.13325025138007268, 0.35822936499573854, 1.6148291592799069e-09, 1.875173153042101e-18, 2.0941053890943806e-07, 7.105427357601002e-15, 0.49926428135900186, 0.16492940199937894, 1.3174450829220094e-17, 2.1116435458495668e-16, 1.1681587528983658e-09, 3.2592359986208345e-22, 6.246014200977821e-10, 0.16814918247297933, 0.16545072935045158, 1.1711026222995016e-13, 0.0, 0.16466886776470133, 0.1669247276390117, 0.0, 5.100713274794273e-08, 7.105427357601002e-15, 0.1546541084142275, 2.794209383371835e-22, 0.0, 0.16763973281840122, 6.314992660308343e-25, 0.16818612974609914, 0.009088502666434949, 0.16836320220350243, 8.265423131146944e-11, 0.15634635438381983, 0.0, 0.0, 0.021135423572131908, 0.021135423572131908, 2.388155749350249e-20, 0.0, 8.336724500779247e-25, 5.301380942484303e-05, 0.1569510658025109, 4.034345030660423e-08, 1.552591960229748e-23, 1.5930611717254445e-17, 5.766100772128885e-14, 2.957989408969297e-11, 0.16837836924209137, 0.167380186090632, 1.4815146472600273e-18, 2.3241499374144116e-16, 0.960706684375328, 0.16663339666347546, 0.4648987049382678),
(True, 0.8351433470765426, 0.007706099388131821, 0.007947956066516883, 0.022686947292310716, 0.014991735875121545, 0.01165676885872922, 0.04363307088403046, 0.02749911604495756, 0.032662911515606415, 0.017559065834968416, 0.04026960261153979, 0.02255342469024842, 0.047093618061642833, 0.027604880004625645, 4.074887748553275e-10, 2.1752345554537253e-08, 6.893568743986756e-07, 0.10021777036808066, 0.019674268425336955, 2.509947805151569e-11, 0.9527866535679592, 5.856109102466708e-17, 1.0887143007578768e-06, 0.8435295694744586, 7.522455917366477e-13, 0.005306407287245952, 0.1643182689263654, 0.2452637246445762, 0.2641612329547135, 0.7336695581715281, 0.9594279052184913, 0.009170044543122991, 0.004045051693865415, 0.9999989976196331, 0.016128763770813574, 0.16933619262867025, 0.45804363114686764, 0.010612373006048977, 0.010174143427333127, 0.02249014468472591, 0.010174143427333195, 0.0053975516451804095, 0.9982852169376433, 0.007561157318746206, 0.9179267374547876, 0.9751559481267165, 0.6045870173147739, 0.022099416719337316, 0.005248596585887298, 0.06131633436401962, 0.033330412715665114, 9.938586887440123e-22, 0.14249225481045538, 0.06296313323086282, 0.3582069667640977, 4.829929905968561e-15, 0.08375773817691123, 0.018975839977920975, 0.055286669276326375, 5.2299730302677424e-05, 5.695163496355349e-08, 0.3356692195895122, 0.0, 1.142426579532403e-21, 2.4024551296122493e-23, 0.04817195965121822, 0.008721533485877977, 5.761429172990433e-21, 3.608752519347532e-15, 0.14598011853624576, 1.7355671790267907e-22, 1.1809375699556313e-10, 0.05998337856769929, 9.05498589442999e-08, 4.6097836658987035e-10, 0.1570723761512004, 0.8417287418088772, 0.1156448000491021, 0.08076015142621962, 0.02750088405103144, 0.026103288321088658, 0.12445962953209416, 0.07318164957419467, 0.16709687358966352, 0.07558100887358327, 8.786222326940566e-24, 0.7172944132100186, 0.030449466764316042, 0.21106179193952035, 5.301380942484303e-05, 1.426575645079419e-16, 0.037316039578400684, 6.952720170237173e-13, 0.0, 9.796908728381612e-13, 0.0002642813530468513, 0.32334960795567924, 2.863050056034425e-09, 0.7046095919319575, 0.2561373864373436, 2.9948775774335523e-06, 0.2776390123534972, 5.301380942484303e-05, 0.006171383087998217, 5.301380942484303e-05, 0.5, 0.3847104505818606, 5.301380942484303e-05, 5.301380942484303e-05, 0.16466451519846614, 5.301380942484303e-05, 0.9986534460857486, 0.39982063322767364, 0.060462928433970345, 5.708678464956527e-19, 3.323628039453297e-10, 9.641976432207614e-10, 7.105427357601002e-15, 5.837075859767282e-13, 1.1988064218527426e-20, 0.16507954777521547, 0.16704063109164155, 0.16649833422826882, 0.16648681554273437, 0.20286466138783588, 0.06936588086433818, 7.105427357601002e-15, 0.16834648002367886, 0.16798293243083, 6.871898605709248e-10, 0.0011815729111892548, 0.16433263444740043, 0.0008377769174453542, 0.16632573432045866, 0.1630341420539464, 0.16507954777521783, 9.569704921590424e-08, 3.4970892216197174e-08, 0.16820617469834753, 0.16580685767199677, 0.1673465945690054, 7.13964618592744e-07, 2.64115098167329e-12, 0.09991279113071351, 0.9995291135114167, 0.37773784802857724, 0.004506167784075659, 0.13325025138007268, 0.35822936499573854, 1.6148291592799069e-09, 1.875173153042101e-18, 2.0941053890943806e-07, 7.105427357601002e-15, 0.49926428135900186, 0.16492940199937894, 1.3174450829220094e-17, 2.1116435458495668e-16, 1.1681587528983658e-09, 3.2592359986208345e-22, 6.246014200977821e-10, 0.16814918247297933, 0.16545072935045158, 1.1711026222995016e-13, 0.0, 0.16466886776470133, 0.1669247276390117, 0.0, 5.100713274794273e-08, 7.105427357601002e-15, 0.1546541084142275, 2.794209383371835e-22, 0.0, 0.16763973281840122, 6.314992660308343e-25, 0.16818612974609914, 0.009088502666434949, 0.16836320220350243, 8.265423131146944e-11, 0.15634635438381983, 0.0, 0.0, 0.021135423572131908, 0.021135423572131908, 2.388155749350249e-20, 0.0, 8.336724500779247e-25, 5.301380942484303e-05, 0.1569510658025109, 4.034345030660423e-08, 1.552591960229748e-23, 1.5930611717254445e-17, 5.766100772128885e-14, 2.957989408969297e-11, 0.16837836924209137, 0.167380186090632, 1.4815146472600273e-18, 2.3241499374144116e-16, 0.999199216932124, 0.16663339666347546, 0.47051853675791894),
(True, 0.7911998190455012, 0.008800649616294641, 0.012570648480435946, 0.0360263772579401, 0.024478322176278763, 0.0178555885816629, 0.06855090610604317, 0.044329234858940546, 0.050578490811933365, 0.028246449024710487, 0.05949049780401471, 0.034139907925263516, 0.06764352461984338, 0.04030307802044964, 4.074887748553275e-10, 2.1752345554537253e-08, 6.893568743986756e-07, 0.10021777036808066, 0.019674268425336955, 2.509947805151569e-11, 0.9678060848252985, 5.856109102466708e-17, 1.0887143007578768e-06, 0.8435295694744586, 7.522455917366477e-13, 0.008640006016889879, 0.0936889372333889, 0.1307941996207988, 0.17847498111695453, 0.7870936438338828, 0.9594279052184913, 0.014188842444193574, 0.006415745073166158, 0.9999989976196331, 0.029313240791841205, 0.2891738918265846, 0.586653594130873, 0.016887993395513938, 0.010185042847651588, 0.022490144859944602, 0.010185042847651654, 0.005397551757332697, 0.9983077598749016, 0.00756115713370315, 0.9181008652246858, 0.9751559479350059, 0.6987423700245083, 0.03421776780713893, 0.008507855659691374, 0.06131633436401962, 0.033330412715665114, 9.938586887440123e-22, 0.14249225481045538, 0.06296313323086282, 0.3582069667640977, 4.829929905968561e-15, 0.08375773817691123, 0.018975839977920975, 0.055286669276326375, 5.2299730302677424e-05, 5.695163496355349e-08, 0.4663102638320793, 0.0, 1.142426579532403e-21, 2.4024551296122493e-23, 0.056781709571613034, 0.008721533485877977, 5.761429172990433e-21, 3.608752519347532e-15, 0.14598011853624576, 1.7355671790267907e-22, 1.1809375699556313e-10, 0.05998337856769929, 9.05498589442999e-08, 4.6097836658987035e-10, 0.1570723761512004, 0.8850064479703426, 0.1156448000491021, 0.08076015142621962, 0.02750088405103144, 0.026103288321088658, 0.12445962953209416, 0.07318164957419467, 0.16709687358966352, 0.07558100887358327, 8.786222326940566e-24, 0.7797881455384017, 0.030449466764316042, 0.21106179193952035, 5.301380942484303e-05, 1.426575645079419e-16, 0.037316039578400684, 6.952720170237173e-13, 0.0, 9.796908728381612e-13, 0.0002642813530468513, 0.32334960795567924, 2.863050056034425e-09, 0.781215519801578, 0.2561373864373436, 2.9948775774335523e-06, 0.2776390123534972, 5.301380942484303e-05, 0.006171383087998217, 5.301380942484303e-05, 0.5, 0.3847104505818606, 5.301380942484303e-05, 5.301380942484303e-05, 0.16466451519846614, 5.301380942484303e-05, 0.9986534460857486, 0.39982063322767364, 0.06618910789702505, 5.708678464956527e-19, 3.323628039453297e-10, 9.641976432207614e-10, 7.105427357601002e-15, 5.837075859767282e-13, 1.1988064218527426e-20, 0.16507954777521547, 0.16704063109164155, 0.16649833422826882, 0.16648681554273437, 0.20286466138783588, 0.06936588086433818, 7.105427357601002e-15, 0.16834648002367886, 0.16798293243083, 6.871898605709248e-10, 0.0011815729111892548, 0.16433263444740043, 0.0008377769174453542, 0.16632573432045866, 0.1630341420539464, 0.16507954777521783, 9.569704921590424e-08, 3.4970892216197174e-08, 0.16820617469834753, 0.16580685767199677, 0.1673465945690054, 7.13964618592744e-07, 2.64115098167329e-12, 0.09991279113071351, 0.9995291135114167, 0.37773784802857724, 0.004506167784075659, 0.13325025138007268, 0.35822936499573854, 1.6148291592799069e-09, 1.875173153042101e-18, 2.0941053890943806e-07, 7.105427357601002e-15, 0.49926428135900186, 0.16492940199937894, 1.3174450829220094e-17, 2.1116435458495668e-16, 1.1681587528983658e-09, 3.2592359986208345e-22, 6.246014200977821e-10, 0.16814918247297933, 0.16545072935045158, 1.1711026222995016e-13, 0.0, 0.16466886776470133, 0.1669247276390117, 0.0, 5.100713274794273e-08, 7.105427357601002e-15, 0.1546541084142275, 2.794209383371835e-22, 0.0, 0.16763973281840122, 6.314992660308343e-25, 0.16818612974609914, 0.009088502666434949, 0.16836320220350243, 8.265423131146944e-11, 0.15634635438381983, 0.0, 0.0, 0.021135423572131908, 0.021135423572131908, 2.388155749350249e-20, 0.0, 8.336724500779247e-25, 5.301380942484303e-05, 0.1569510658025109, 4.034345030660423e-08, 1.552591960229748e-23, 1.5930611717254445e-17, 5.766100772128885e-14, 2.957989408969297e-11, 0.16837836924209137, 0.167380186090632, 1.4815146472600273e-18, 2.3241499374144116e-16, 0.999984030798407, 0.16663339666347546, 0.4586610020915609),
(True, 0.7480715976509739, 0.010020123249078111, 0.01918745294320575, 0.05466672261311227, 0.038266648952003934, 0.02644755790287131, 0.10252981512083055, 0.06863664111326576, 0.07497062698983498, 0.04344292663361637, 0.08435276828703127, 0.04968226279965248, 0.09341788667508033, 0.05663748902514149, 4.074887748553275e-10, 2.1752345554537253e-08, 6.893568743986756e-07, 0.10021777036808066, 0.019674268425336955, 2.509947805151569e-11, 0.9780980123097015, 5.856109102466708e-17, 1.0887143007578768e-06, 0.8435295694744586, 7.522455917366477e-13, 0.01353667324995541, 0.05626872891117488, 0.07521129726842302, 0.12665389351705464, 0.8278732746082242, 0.9594279052184913, 0.021221392591123968, 0.009828000700507902, 0.9999989976196331, 0.04954819370276316, 0.42841740464384837, 0.6215317554945545, 0.025891577012630264, 0.010193563530303111, 0.022490144860609185, 0.010193563530303177, 0.005397551757761576, 0.9983222139676136, 0.0075611571329955215, 0.9182134947177039, 0.975155947934278, 0.7792571695927133, 0.051050972845539444, 0.013277956653669779, 0.06131633436401962, 0.033330412715665114, 9.938586887440123e-22, 0.14249225481045538, 0.06296313323086282, 0.3582069667640977, 4.829929905968561e-15, 0.08375773817691123, 0.018975839977920975, 0.055286669276326375, 5.2299730302677424e-05, 5.695163496355349e-08, 0.5891256641404885, 0.0, 1.142426579532403e-21, 2.4024551296122493e-23, 0.06735166756092135, 0.008721533485877977, 5.761429172990433e-21, 3.608752519347532e-15, 0.14598011853624576, 1.7355671790267907e-22, 1.1809375699556313e-10, 0.05998337856769929, 9.05498589442999e-08, 4.6097836658987035e-10, 0.1570723761512004, 0.9190928996143982, 0.1156448000491021, 0.08076015142621962, 0.02750088405103144, 0.026103288321088658, 0.12445962953209416, 0.07318164957419467, 0.16709687358966352, 0.07558100887358327, 8.786222326940566e-24, 0.8279632700575962, 0.030449466764316042, 0.21106179193952035, 5.301380942484303e-05, 1.426575645079419e-16, 0.037316039578400684, 6.952720170237173e-13, 0.0, 9.796908728381612e-13, 0.0002642813530468513, 0.32334960795567924, 2.863050056034425e-09, 0.8397176204588883, 0.2561373864373436, 2.9948775774335523e-06, 0.2776390123534972, 5.301380942484303e-05, 0.006171383087998217, 5.301380942484303e-05, 0.5, 0.3847104505818606, 5.301380942484303e-05, 5.301380942484303e-05, 0.16466451519846614, 5.301380942484303e-05, 0.9986534460857486, 0.39982063322767364, 0.07259018957341272, 5.708678464956527e-19, 3.323628039453297e-10, 9.641976432207614e-10, 7.105427357601002e-15, 5.837075859767282e-13, 1.1988064218527426e-20, 0.16507954777521547, 0.16704063109164155, 0.16649833422826882, 0.16648681554273437, 0.20286466138783588, 0.06936588086433818, 7.105427357601002e-15, 0.16834648002367886, 0.16798293243083, 6.871898605709248e-10, 0.0011815729111892548, 0.16433263444740043, 0.0008377769174453542, 0.16632573432045866, 0.1630341420539464, 0.16507954777521783, 9.569704921590424e-08, 3.4970892216197174e-08, 0.16820617469834753, 0.16580685767199677, 0.1673465945690054, 7.13964618592744e-07, 2.64115098167329e-12, 0.09991279113071351, 0.9995291135114167, 0.37773784802857724, 0.004506167784075659, 0.13325025138007268, 0.35822936499573854, 1.6148291592799069e-09, 1.875173153042101e-18, 2.0941053890943806e-07, 7.105427357601002e-15, 0.49926428135900186, 0.16492940199937894, 1.3174450829220094e-17, 2.1116435458495668e-16, 1.1681587528983658e-09, 3.2592359986208345e-22, 6.246014200977821e-10, 0.16814918247297933, 0.16545072935045158, 1.1711026222995016e-13, 0.0, 0.16466886776470133, 0.1669247276390117, 0.0, 5.100713274794273e-08, 7.105427357601002e-15, 0.1546541084142275, 2.794209383371835e-22, 0.0, 0.16763973281840122, 6.314992660308343e-25, 0.16818612974609914, 0.009088502666434949, 0.16836320220350243, 8.265423131146944e-11, 0.15634635438381983, 0.0, 0.0, 0.021135423572131908, 0.021135423572131908, 2.388155749350249e-20, 0.0, 8.336724500779247e-25, 5.301380942484303e-05, 0.1569510658025109, 4.034345030660423e-08, 1.552591960229748e-23, 1.5930611717254445e-17, 5.766100772128885e-14, 2.957989408969297e-11, 0.16837836924209137, 0.167380186090632, 1.4815146472600273e-18, 2.3241499374144116e-16, 0.9999996825114967, 0.16663339666347546, 0.43104757445818576),
(True, 0.7074277156045184, 0.011389350278344522, 0.028374169579839063, 0.07968495304501298, 0.05752115635026047, 0.03802595393886921, 0.14659756816753064, 0.10201305959998892, 0.10679379407623839, 0.06418588013195548, 0.11541923233364575, 0.06986317260754552, 0.12473537821991337, 0.0770536102709045, 4.074887748553275e-10, 2.1752345554537253e-08, 6.893568743986756e-07, 0.10021777036808066, 0.019674268425336955, 2.509947805151569e-11, 0.9851507964891669, 5.856109102466708e-17, 1.0887143007578768e-06, 0.8435295694744586, 7.522455917366477e-13, 0.020502691481613995, 0.03583189105440813, 0.04670544957106467, 0.07255557607247386, 0.8592031241241455, 0.9594279052184913, 0.030800301501078944, 0.014603503516862845, 0.9999989976196331, 0.07867146842174601, 0.5643305618956167, 0.7358722993322504, 0.03839377129635346, 0.010200408718714183, 0.022490144860609185, 0.010200408718714254, 0.005397551757762353, 0.9983320439869565, 0.007561157132994219, 0.9182905382437727, 0.9751559479342766, 0.8432672673074932, 0.07363903705016071, 0.020045399762039697, 0.06131633436401962, 0.033330412715665114, 9.938586887440123e-22, 0.14249225481045538, 0.06296313323086282, 0.3582069667640977, 4.829929905968561e-15, 0.08375773817691123, 0.018975839977920975, 0.055286669276326375, 5.2299730302677424e-05, 5.695163496355349e-08, 0.6926334011962839, 0.0, 1.142426579532403e-21, 2.4024551296122493e-23, 0.08038395565370364, 0.008721533485877977, 5.761429172990433e-21, 3.608752519347532e-15, 0.14598011853624576, 1.7355671790267907e-22, 1.1809375699556313e-10, 0.05998337856769929, 9.05498589442999e-08, 4.6097836658987035e-10, 0.1570723761512004, 0.9450522527061462, 0.1156448000491021, 0.08076015142621962, 0.02750088405103144, 0.026103288321088658, 0.12445962953209416, 0.07318164957419467, 0.16709687358966352, 0.07558100887358327, 8.786222326940566e-24, 0.8650419623325789, 0.030449466764316042, 0.21106179193952035, 5.301380942484303e-05, 1.426575645079419e-16, 0.037316039578400684, 6.952720170237173e-13, 0.0, 9.796908728381612e-13, 0.0002642813530468513, 0.32334960795567924, 2.863050056034425e-09, 0.8835152300577735, 0.2561373864373436, 2.9948775774335523e-06, 0.2776390123534972, 5.301380942484303e-05, 0.006171383087998217, 5.301380942484303e-05, 0.5, 0.3847104505818606, 5.301380942484303e-05, 5.301380942484303e-05, 0.16466451519846614, 5.301380942484303e-05, 0.9986534460857486, 0.39982063322767364, 0.07975389133191409, 5.708678464956527e-19, 3.323628039453297e-10, 9.641976432207614e-10, 7.105427357601002e-15, 5.837075859767282e-13, 1.1988064218527426e-20, 0.16507954777521547, 0.16704063109164155, 0.16649833422826882, 0.16648681554273437, 0.20286466138783588, 0.06936588086433818, 7.105427357601002e-15, 0.16834648002367886, 0.16798293243083, 6.871898605709248e-10, 0.0011815729111892548, 0.16433263444740043, 0.0008377769174453542, 0.16632573432045866, 0.1630341420539464, 0.16507954777521783, 9.569704921590424e-08, 3.4970892216197174e-08, 0.16820617469834753, 0.16580685767199677, 0.1673465945690054, 7.13964618592744e-07, 2.64115098167329e-12, 0.09991279113071351, 0.9995291135114167, 0.37773784802857724, 0.004506167784075659, 0.13325025138007268, 0.35822936499573854, 1.6148291592799069e-09, 1.875173153042101e-18, 2.0941053890943806e-07, 7.105427357601002e-15, 0.49926428135900186, 0.16492940199937894, 1.3174450829220094e-17, 2.1116435458495668e-16, 1.1681587528983658e-09, 3.2592359986208345e-22, 6.246014200977821e-10, 0.16814918247297933, 0.16545072935045158, 1.1711026222995016e-13, 0.0, 0.16466886776470133, 0.1669247276390117, 0.0, 5.100713274794273e-08, 7.105427357601002e-15, 0.1546541084142275, 2.794209383371835e-22, 0.0, 0.16763973281840122, 6.314992660308343e-25, 0.16818612974609914, 0.009088502666434949, 0.16836320220350243, 8.265423131146944e-11, 0.15634635438381983, 0.0, 0.0, 0.021135423572131908, 0.021135423572131908, 2.388155749350249e-20, 0.0, 8.336724500779247e-25, 5.301380942484303e-05, 0.1569510658025109, 4.034345030660423e-08, 1.552591960229748e-23, 1.5930611717254445e-17, 5.766100772128885e-14, 2.957989408969297e-11, 0.16837836924209137, 0.167380186090632, 1.4815146472600273e-18, 2.3241499374144116e-16, 0.9999999937047724, 0.16663339666347546, 0.39028355156239286),]
|
"""
This module allows to perform a specific extrinsic evaluation of files by a specified criteria.
Antoine Orgerit - François Gréau - Lisa Fougeron
La Rochelle Université - 2019-2020
"""
import langid
import json
import copy
import subprocess
from os import listdir, remove
from os.path import isfile, join
from utils.daniel.evaluate import get_results, get_dic
def print_TP_FP_FN_TN(tools_criterias_data):
"""
Outputs TP, FP, FN and TN results of the evaluated files.
"""
print("TOOLS\t\t|TP\t|FP\t|FN\t|TN")
print("------------------------------------------------")
for tool in tools_criterias_data:
if len(tool) > 7:
print(tool + "\t|", end="")
else:
print(tool + "\t\t|", end="")
print(str(tools_criterias_data[tool][0]["TP"]) + "\t|" + str(tools_criterias_data[tool][0]["FP"]) + "\t|" + str(tools_criterias_data[tool][0]["FN"]) + "\t|" + str(tools_criterias_data[tool][0]["TN"]))
print()
def print_FRP(tools_criterias_data, default_header_key):
"""
Outputs F-score, Recall and Precision results of the evaluated files.
"""
print("TOOLS\t\t|\t\tAll\t\t", end="")
add_spacing = []
for criteria in tools_criterias_data[default_header_key][2]:
if len(criteria) >= 24:
print("|" + criteria + "\t", end="")
if len(criteria) >= 31:
add_spacing.append(criteria)
elif len(criteria) >= 16:
print("|\t" + criteria + "\t", end="")
elif len(criteria) >= 8:
print("|\t" + criteria + "\t\t", end="")
else:
print("|\t\t" + criteria + "\t\t", end="")
print()
print("\t\t|\tF\tR\tP\t", end="")
for criteria in tools_criterias_data[default_header_key][2]:
print("|\tF\tR\tP\t", end="")
if criteria in add_spacing:
print("\t", end="")
print()
print("------------------------------------------------", end="")
for criteria in tools_criterias_data[default_header_key][2]:
print("--------------------------------", end="")
if criteria in add_spacing:
print("--------", end="")
print()
for tool in tools_criterias_data:
if len(tool) > 7:
print(tool + "\t", end="")
else:
print(tool + "\t\t", end="")
print("|\t" + str(format(tools_criterias_data[tool][1]["F1-measure"], ".2f")) + "\t" + str(format(tools_criterias_data[tool][1]["Recall"], ".2f")) + "\t" + str(format(tools_criterias_data[tool][1]["Precision"], ".2f")) + "\t", end="")
for criteria in tools_criterias_data[tool][2]:
print("|\t" + str(format(tools_criterias_data[tool][2][criteria]["F1-measure"], ".2f")) + "\t" + str(format(tools_criterias_data[tool][2][criteria]["Recall"], ".2f")) + "\t" + str(format(tools_criterias_data[tool][2][criteria]["Precision"], ".2f")) + "\t", end="")
if criteria in add_spacing:
print("\t", end="")
print()
print()
def detect_language(file_path):
"""
Allows to detect the language used in a file using the langid module.
"""
file = open(file_path, "r", encoding="utf8")
language = langid.classify(file.read())
file.close()
return language
def delete_unused_files(clean_repository_json_path, files_to_evaluate):
"""
Allows to remove unused files in the JSON file at clean_repository_json_path path that are not
present in the JSON object files_to_evaluate.
"""
clean_repository = json.load(open(clean_repository_json_path, "r", encoding="utf8"))
for id in list(clean_repository):
if not clean_repository[id]["path"] in files_to_evaluate:
clean_repository.pop(id)
return clean_repository
def prepare_json(json_content, path):
"""
Allows to prepare a JSON object from the clean result json_content
and specific tool files path.
"""
prepared_json = {}
for id, infos in json_content.items():
new_infos = copy.copy(infos)
new_infos["document_path"] = path + new_infos["path"]
new_infos["language"] = new_infos["langue"]
new_infos.pop("langue")
prepared_json[id] = new_infos
return prepared_json
def process_corpus():
"""
Allows to process the files present in eval.json using Daniel process_corpus.py file.
"""
out = subprocess.check_output(['python', '../utils/daniel/process_corpus.py', '-c ../../exo5/eval.json'])
composed_out = out.decode('ascii').split("\r\n")
composed_out = composed_out[len(composed_out) - 2].split("/")
return composed_out[len(composed_out) - 1]
def evaluate(processed_file, criteria_extraction):
"""
Allows to evaluate the result of the eval.json file with the gold.json reference file
using Daniel evaluate.py file.
"""
gold = get_dic('./gold.json')
eval = get_dic('./' + processed_file)
return get_results(gold, eval, criteria_extraction)
def perform_extrinsic_evaluation(clean_repository_path_and_json, source_repositories_name_and_path, criteria_extraction, print_header_key=None):
"""
Allows to perform an extrinsic evaluation from reference files path and json file clean_repository_path_and_json,
files to evaluate linked to their generator tool source_repositories_name_and_path, using an extraction criteria
criteria_extraction.
"""
global_data = {}
for source_repository_name_and_path in source_repositories_name_and_path:
files_to_evaluate = [f for f in listdir(source_repository_name_and_path[1]) if isfile(join(source_repository_name_and_path[1], f))]
clean_repository = delete_unused_files(clean_repository_path_and_json[1], files_to_evaluate)
gold_json = prepare_json(clean_repository, clean_repository_path_and_json[0])
eval_json = prepare_json(clean_repository, source_repository_name_and_path[1])
gold_file = open("./gold.json", "w")
gold_file.write(json.dumps(gold_json))
gold_file.close()
eval_file = open("./eval.json", "w")
eval_file.write(json.dumps(eval_json))
eval_file.close()
processed_file = process_corpus()
global_data[source_repository_name_and_path[0]] = evaluate(processed_file, criteria_extraction)
remove("./gold.json")
remove("./eval.json")
remove("./test.out")
remove("./tmp")
remove("./" + processed_file)
print_TP_FP_FN_TN(global_data)
if print_header_key != None:
print_FRP(global_data, print_header_key)
return global_data
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 7 22:59:01 2018
@author: jeanfernandes
"""
import pandas as pd
base = pd.read_csv('./bigData/census.csv')
previsores = base.iloc[:, 0:14].values
classe = base.iloc[:, 14].values
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
encoder_previsores = LabelEncoder()
#labels = encoder_previsores.fit_transform(previsores[:, 1])
previsores[:, 1] = encoder_previsores.fit_transform(previsores[:, 1])
previsores[:, 3] = encoder_previsores.fit_transform(previsores[:, 3])
previsores[:, 5] = encoder_previsores.fit_transform(previsores[:, 5])
previsores[:, 6] = encoder_previsores.fit_transform(previsores[:, 6])
previsores[:, 7] = encoder_previsores.fit_transform(previsores[:, 7])
previsores[:, 8] = encoder_previsores.fit_transform(previsores[:, 8])
previsores[:, 9] = encoder_previsores.fit_transform(previsores[:, 9])
previsores[:, 13] = encoder_previsores.fit_transform(previsores[:, 13])
onehotencoder = OneHotEncoder(categorical_features = [1, 3, 5, 6, 7, 8, 9, 13])
previsores = onehotencoder.fit_transform(previsores).toarray()
labelencoder_classe = LabelEncoder()
classe = labelencoder_classe.fit_transform(classe)
#escalonamento
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
previsores = scaler.fit_transform(previsores)
|
import getpass
import json
import os
from xdg import XDG_CONFIG_HOME
PROGRAMAKER_BRIDGE_ENDPOINT_ENV = 'PLAZA_BRIDGE_ENDPOINT'
PROGRAMAKER_AUTH_TOKEN_ENV = 'PLAZA_BRIDGE_AUTH_TOKEN'
PROGRAMAKER_BRIDGE_ENDPOINT_INDEX = "plaza_bridge_endpoint"
PROGRAMAKER_AUTH_TOKEN_INDEX = 'plaza_authentication_token'
global directory, config_file
directory = os.path.join(XDG_CONFIG_HOME, "plaza", "bridges", "gitlab")
config_file = os.path.join(directory, "config.json")
def _get_config():
if not os.path.exists(config_file):
return {}
with open(config_file, "rt") as f:
return json.load(f)
def _save_config(config):
os.makedirs(directory, exist_ok=True)
with open(config_file, "wt") as f:
return json.dump(config, f)
def get_bridge_endpoint():
# Check if the bridge endpoint is defined in an environment variable
programaker_bridge_endpoint_env = os.getenv(PROGRAMAKER_BRIDGE_ENDPOINT_ENV, None)
if programaker_bridge_endpoint_env is not None:
return programaker_bridge_endpoint_env
# If not, request it and save it to a file
config = _get_config()
if config.get(PROGRAMAKER_BRIDGE_ENDPOINT_INDEX, None) is None:
config[PROGRAMAKER_BRIDGE_ENDPOINT_INDEX] = input("Programaker bridge endpoint: ")
if not config[PROGRAMAKER_BRIDGE_ENDPOINT_INDEX]:
raise Exception("No bridge endpoint introduced")
_save_config(config)
return config[PROGRAMAKER_BRIDGE_ENDPOINT_INDEX]
def get_auth_token():
env_val = os.getenv(PROGRAMAKER_AUTH_TOKEN_ENV, None)
if env_val is not None:
return env_val
config = _get_config()
if config.get(PROGRAMAKER_AUTH_TOKEN_INDEX, None) is None:
config[PROGRAMAKER_AUTH_TOKEN_INDEX] = input('Programaker authentication TOKEN: ')
if not config[PROGRAMAKER_AUTH_TOKEN_INDEX]:
raise Exception('No authentication token introduced')
_save_config(config)
return config[PROGRAMAKER_AUTH_TOKEN_INDEX]
|
import pytest
from mlagents.plugins.stats_writer import register_stats_writer_plugins
from mlagents.trainers.settings import RunOptions
from mlagents_plugin_examples.example_stats_writer import ExampleStatsWriter
@pytest.mark.check_environment_trains
def test_register_stats_writers():
# Make sure that the ExampleStatsWriter gets returned from the list of all StatsWriters
stats_writers = register_stats_writer_plugins(RunOptions())
assert any(isinstance(sw, ExampleStatsWriter) for sw in stats_writers)
|
#!/usr/bin/env python
# Copyright (c) 2006-2010 Tampere University of Technology
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from sys import argv,exit
import optparse
from time import sleep
from tema.validator.modelvalidator import *
from tema.model import getModelType,loadModel
def validateModel(modelName,modelFormat,modelType):
if not modelFormat:
modelFormat = getModelType(modelName)
if modelType:
modelType = eval("ModelValidator.%s" % modelType.upper())
else:
modelFormats = [ "mdm","lsts" ]
for format in modelFormats:
if modelName.endswith(".refined.%s" % format ):
modelType = ModelValidator.REFINED_MACHINE
elif modelName.endswith("-im.%s" % format ):
modelType = ModelValidator.INITIALIZATION_MACHINE
elif modelName.endswith("-lm.%s" % format ):
modelType = ModelValidator.LAUNCH_MACHINE
elif modelName.endswith("-rm.%s" % format ):
modelType = ModelValidator.REFINEMENT_MACHINE
elif modelName.endswith(".%s" % format ):
modelType = ModelValidator.ACTION_MACHINE
elif modelName.endswith(".ext") or modelName.endswith(".parallellsts") or modelName.endswith(".parallel"):
modelType = ModelValidator.COMPOSED_MACHINE
if modelType is not None:
break
else:
print 'File %s is in unknown format' % modelName
print ''
return
model = None
if modelName == "-":
modelFile = sys.stdin
else:
modelFile = open(modelName)
try:
model = loadModel(modelFormat,modelFile)
finally:
if modelName != "-":
modelFile.close()
errors = []
warnings = []
validator = ModelValidator(model)
lock = validator.beginValidation(modelType, errors, warnings)
if lock == None:
print 'Model %s is of an unknown type.' % modelName
print ''
return
while True:
sleep(0.1)
if not lock.locked():
break
pruneErrors(errors)
pruneErrors(warnings)
for i in errors + warnings:
for j in i[1]:
if j[:4] == 'path' and i[1][j] != None:
i[1][j] = [str(k) for k in i[1][j]]
if (len(errors) == 0 and len(warnings) == 0):
print 'Model %s is valid.' % modelName
else:
if (len(errors) > 0):
print 'Errors in model %s:\n' % modelName +\
str([(i[0], ModelValidator.defaultErrorMessages[i[0]] % i[1]) for i in errors])
if (len(warnings) > 0):
print 'Warnings in model %s:\n' % modelName +\
str([(i[0], ModelValidator.defaultErrorMessages[i[0]] % i[1]) for i in warnings])
print ''
def readArgs():
usagemessage = "usage: %prog [options] [filenames]"
description = "If no filenames are given or filename is -, reads from standard input"
parser = optparse.OptionParser(usage=usagemessage,description=description)
parser.add_option("-f", "--format", action="store", type="str",
help="Format of the model file")
parser.add_option("-t", "--type", action="store", type="str",
help="Type of the model")
options, args = parser.parse_args(argv[1:])
if len(args) == 0:
args.append("-")
elif "-" in args and len(args) > 1:
parser.error("Can't read from stdin and from files at the same time")
if not options.format and "-" in args:
parser.error("Reading from standard input requires format parameter")
if options.type and options.type.upper() not in ["REFINED_MACHINE","INITIALIZATION_MACHINE","LAUNCH_MACHINE","REFINEMENT_MACHINE","ACTION_MACHINE","COMPOSED_MACHINE"]:
parser.error("Unknown model type '%s'" % options.type)
return args,options
def main():
args,options = readArgs()
print ''
for filename in args:
validateModel(filename,options.format,options.type)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
exit(1)
|
import numba
from numba import cuda, float32, int32
import numpy as np
import math
import cmath
@cuda.jit(device=True)
def euclidean_distance(x1, y1, z1, x2, y2, z2):
square_distance = (x1 - x2) ** 2 + (y1 - y2) ** 2 + (z1 - z2) ** 2
distance = math.sqrt(square_distance)
return distance
@cuda.jit(device=True)
def square_euclidean_distance(x1, y1, z1, x2, y2, z2):
square_distance = (x1 - x2) ** 2 + (y1 - y2) ** 2 + (z1 - z2) ** 2
return square_distance
@cuda.jit(device=True)
def warp_point_with_nodes(node_positions, nodes_rotation, nodes_translation, pos_x, pos_y, pos_z):
now_x = pos_x - node_positions[0]
now_y = pos_y - node_positions[1]
now_z = pos_z - node_positions[2]
now_x = nodes_rotation[0, 0] * now_x + \
nodes_rotation[0, 1] * now_y + \
nodes_rotation[0, 2] * now_z
now_y = nodes_rotation[1, 0] * now_x + \
nodes_rotation[1, 1] * now_y + \
nodes_rotation[1, 2] * now_z
now_z = nodes_rotation[2, 0] * now_x + \
nodes_rotation[2, 1] * now_y + \
nodes_rotation[2, 2] * now_z
now_x = now_x + node_positions[0] + nodes_translation[0]
now_y = now_y + node_positions[1] + nodes_translation[1]
now_z = now_z + node_positions[2] + nodes_translation[2]
return now_x, now_y, now_z
@cuda.jit(device=True)
def warp_normal_with_nodes(nodes_rotation, normal_x, normal_y, normal_z):
now_x = nodes_rotation[0, 0] * normal_x + \
nodes_rotation[0, 1] * normal_y + \
nodes_rotation[0, 2] * normal_z
now_y = nodes_rotation[1, 0] * normal_x + \
nodes_rotation[1, 1] * normal_y + \
nodes_rotation[1, 2] * normal_z
now_z = nodes_rotation[2, 0] * normal_x + \
nodes_rotation[2, 1] * normal_y + \
nodes_rotation[2, 2] * normal_z
return now_x, now_y, now_z
@cuda.jit(device=True)
def tsdf_bilinear_sample(data_volume, pos_x, pos_y, pos_z):
x_up = int(math.ceil(pos_x))
x_low = int(math.floor(pos_x))
y_up = int(math.ceil(pos_y))
y_low = int(math.floor(pos_y))
z_up = int(math.ceil(pos_z))
z_low = int(math.floor(pos_z))
a_x = pos_x - x_low
a_y = pos_y - y_low
a_z = pos_z - z_low
bilinear_sampled_tsdf = 0.0
bilinear_sampled_weigth = 0.0
weight_sum = 0.0
valid_count = 0
if data_volume[x_low, y_low, z_low, 1] > 0:
weight_sum += (a_x) * (a_y) * (a_z)
valid_count += 1
if data_volume[x_up, y_low, z_low, 1] > 0:
weight_sum += (1 - a_x) * (a_y) * (a_z)
valid_count += 1
if data_volume[x_low, y_up, z_low, 1] > 0:
weight_sum += (a_x) * (1 - a_y) * (a_z)
valid_count += 1
if data_volume[x_low, y_low, z_up, 1] > 0:
weight_sum += (a_x) * (a_y) * (1 - a_z)
valid_count += 1
if data_volume[x_up, y_up, z_low, 1] > 0:
weight_sum += (1 - a_x) * (1 - a_y) * (a_z)
valid_count += 1
if data_volume[x_low, y_up, z_up, 1] > 0:
weight_sum += (a_x) * (1 - a_y) * (1 - a_z)
valid_count += 1
if data_volume[x_up, y_low, z_up, 1] > 0:
weight_sum += (1 - a_x) * (a_y) * (1 - a_z)
valid_count += 1
if data_volume[x_up, y_up, z_up, 1] > 0:
weight_sum += (1 - a_x) * (1 - a_y) * (1 - a_z)
valid_count += 1
if weight_sum > 0 and valid_count > 4:
if data_volume[x_low, y_low, z_low, 1] > 0:
bilinear_sampled_tsdf += data_volume[x_low, y_low,
z_low, 0] * (a_x) * (a_y) * (a_z) / weight_sum
bilinear_sampled_weigth += data_volume[x_low, y_low,
z_low, 1] * (a_x) * (a_y) * (a_z) / weight_sum
if data_volume[x_up, y_low, z_low, 1] > 0:
bilinear_sampled_tsdf += data_volume[x_up, y_low,
z_low, 0] * (1 - a_x) * (a_y) * (a_z) / weight_sum
bilinear_sampled_weigth += data_volume[x_low, y_low,
z_low, 1] * (a_x) * (a_y) * (a_z) / weight_sum
if data_volume[x_low, y_up, z_low, 1] > 0:
bilinear_sampled_tsdf += data_volume[x_low, y_up,
z_low, 0] * (a_x) * (1 - a_y) * (a_z) / weight_sum
bilinear_sampled_weigth += data_volume[x_low, y_low,
z_low, 1] * (a_x) * (a_y) * (a_z) / weight_sum
if data_volume[x_low, y_low, z_up, 1] > 0:
bilinear_sampled_tsdf += data_volume[x_low, y_low,
z_up, 0] * (a_x) * (a_y) * (1 - a_z) / weight_sum
bilinear_sampled_weigth += data_volume[x_low, y_low,
z_low, 1] * (a_x) * (a_y) * (a_z) / weight_sum
if data_volume[x_up, y_up, z_low, 1] > 0:
bilinear_sampled_tsdf += data_volume[x_up, y_up,
z_low, 0] * (1 - a_x) * (1 - a_y) * (a_z) / weight_sum
bilinear_sampled_weigth += data_volume[x_low, y_low,
z_low, 1] * (a_x) * (a_y) * (a_z) / weight_sum
if data_volume[x_low, y_up, z_up, 1] > 0:
bilinear_sampled_tsdf += data_volume[x_low, y_up,
z_up, 0] * (a_x) * (1 - a_y) * (1 - a_z) / weight_sum
bilinear_sampled_weigth += data_volume[x_low, y_low,
z_low, 1] * (a_x) * (a_y) * (a_z) / weight_sum
if data_volume[x_up, y_low, z_up, 1] > 0:
bilinear_sampled_tsdf += data_volume[x_up, y_low,
z_up, 0] * (1 - a_x) * (a_y) * (1 - a_z) / weight_sum
bilinear_sampled_weigth += data_volume[x_low, y_low,
z_low, 1] * (a_x) * (a_y) * (a_z) / weight_sum
if data_volume[x_up, y_up, z_up, 1] > 0:
bilinear_sampled_tsdf += data_volume[x_up, y_up,
z_up, 0] * (1 - a_x) * (1 - a_y) * (1 - a_z) / weight_sum
bilinear_sampled_weigth += data_volume[x_low, y_low,
z_low, 1] * (a_x) * (a_y) * (a_z) / weight_sum
return bilinear_sampled_tsdf, bilinear_sampled_weigth
else:
return 32767, 0
@cuda.jit(device=True)
def tsdf_bounded_sample(data_volume, pos_x, pos_y, pos_z, min_tsdf):
yta = 3
x_up = int(math.ceil(pos_x))
x_low = int(math.floor(pos_x))
y_up = int(math.ceil(pos_y))
y_low = int(math.floor(pos_y))
z_up = int(math.ceil(pos_z))
z_low = int(math.floor(pos_z))
a_x = pos_x - x_low
a_y = pos_y - y_low
a_z = pos_z - z_low
bilinear_sampled_tsdf = 0.0
bilinear_sampled_weigth = 0.0
weight_sum = 0.0
valid_count = 0
if abs(data_volume[x_low, y_low, z_low, 0] - min_tsdf) < yta:
weight_sum += (a_x) * (a_y) * (a_z)
valid_count += 1
if abs(data_volume[x_up, y_low, z_low, 0] - min_tsdf) < yta:
weight_sum += (1 - a_x) * (a_y) * (a_z)
valid_count += 1
if abs(data_volume[x_low, y_up, z_low, 0] - min_tsdf) < yta:
weight_sum += (a_x) * (1 - a_y) * (a_z)
valid_count += 1
if abs(data_volume[x_low, y_low, z_up, 0] - min_tsdf) < yta:
weight_sum += (a_x) * (a_y) * (1 - a_z)
valid_count += 1
if abs(data_volume[x_up, y_up, z_low, 0] - min_tsdf) < yta:
weight_sum += (1 - a_x) * (1 - a_y) * (a_z)
valid_count += 1
if abs(data_volume[x_low, y_up, z_up, 0] - min_tsdf) < yta:
weight_sum += (a_x) * (1 - a_y) * (1 - a_z)
valid_count += 1
if abs(data_volume[x_up, y_low, z_up, 0] - min_tsdf) < yta:
weight_sum += (1 - a_x) * (a_y) * (1 - a_z)
valid_count += 1
if abs(data_volume[x_up, y_up, z_up, 0] - min_tsdf) < yta:
weight_sum += (1 - a_x) * (1 - a_y) * (1 - a_z)
valid_count += 1
if valid_count > 0 and weight_sum > 0:
if abs(data_volume[x_low, y_low, z_low, 0] - min_tsdf) < yta:
bilinear_sampled_tsdf += data_volume[x_low, y_low,
z_low, 0] * (a_x) * (a_y) * (a_z) / weight_sum
bilinear_sampled_weigth += data_volume[x_low, y_low,
z_low, 1] * (a_x) * (a_y) * (a_z) / weight_sum
if abs(data_volume[x_up, y_low, z_low, 0] - min_tsdf) < yta:
bilinear_sampled_tsdf += data_volume[x_up, y_low,
z_low, 0] * (1 - a_x) * (a_y) * (a_z) / weight_sum
bilinear_sampled_weigth += data_volume[x_low, y_low,
z_low, 1] * (a_x) * (a_y) * (a_z) / weight_sum
if abs(data_volume[x_low, y_up, z_low, 0] - min_tsdf) < yta:
bilinear_sampled_tsdf += data_volume[x_low, y_up,
z_low, 0] * (a_x) * (1 - a_y) * (a_z) / weight_sum
bilinear_sampled_weigth += data_volume[x_low, y_low,
z_low, 1] * (a_x) * (a_y) * (a_z) / weight_sum
if abs(data_volume[x_low, y_low, z_up, 0] - min_tsdf) < yta:
bilinear_sampled_tsdf += data_volume[x_low, y_low,
z_up, 0] * (a_x) * (a_y) * (1 - a_z) / weight_sum
bilinear_sampled_weigth += data_volume[x_low, y_low,
z_low, 1] * (a_x) * (a_y) * (a_z) / weight_sum
if abs(data_volume[x_up, y_up, z_low, 0] - min_tsdf) < yta:
bilinear_sampled_tsdf += data_volume[x_up, y_up,
z_low, 0] * (1 - a_x) * (1 - a_y) * (a_z) / weight_sum
bilinear_sampled_weigth += data_volume[x_low, y_low,
z_low, 1] * (a_x) * (a_y) * (a_z) / weight_sum
if abs(data_volume[x_low, y_up, z_up, 0] - min_tsdf) < yta:
bilinear_sampled_tsdf += data_volume[x_low, y_up,
z_up, 0] * (a_x) * (1 - a_y) * (1 - a_z) / weight_sum
bilinear_sampled_weigth += data_volume[x_low, y_low,
z_low, 1] * (a_x) * (a_y) * (a_z) / weight_sum
if abs(data_volume[x_up, y_low, z_up, 0] - min_tsdf) < yta:
bilinear_sampled_tsdf += data_volume[x_up, y_low,
z_up, 0] * (1 - a_x) * (a_y) * (1 - a_z) / weight_sum
bilinear_sampled_weigth += data_volume[x_low, y_low,
z_low, 1] * (a_x) * (a_y) * (a_z) / weight_sum
if abs(data_volume[x_up, y_up, z_up, 0] - min_tsdf) < yta:
bilinear_sampled_tsdf += data_volume[x_up, y_up,
z_up, 0] * (1 - a_x) * (1 - a_y) * (1 - a_z) / weight_sum
bilinear_sampled_weigth += data_volume[x_low, y_low,
z_low, 1] * (a_x) * (a_y) * (a_z) / weight_sum
return bilinear_sampled_tsdf, bilinear_sampled_weigth
else:
return 32767, 0
@cuda.jit(device=True)
def tsdf_nearest_sample(data_volume, pos_x, pos_y, pos_z):
x = int(round(pos_x))
y = int(round(pos_y))
z = int(round(pos_z))
X_SIZE, Y_SIZE, Z_SIZE = data_volume.shape[:3]
if x >= X_SIZE - 1 or y >= Y_SIZE - 1 or z >= Z_SIZE - 1:
return 32767, 0
else:
return data_volume[x, y, z, 0], data_volume[x, y, z, 1]
@cuda.jit(device=True)
def tsdf_smallest_tsdf(data_volume, pos_x, pos_y, pos_z):
min_tsdf = math.inf
x_up = int(math.ceil(pos_x))
x_low = int(math.floor(pos_x))
y_up = int(math.ceil(pos_y))
y_low = int(math.floor(pos_y))
z_up = int(math.ceil(pos_z))
z_low = int(math.floor(pos_z))
min_tsdf = min(min_tsdf, data_volume[x_low, y_low, z_low, 0])
min_tsdf = min(min_tsdf, data_volume[x_up, y_low, z_low, 0])
min_tsdf = min(min_tsdf, data_volume[x_low, y_up, z_low, 0])
min_tsdf = min(min_tsdf, data_volume[x_low, y_low, z_up, 0])
min_tsdf = min(min_tsdf, data_volume[x_up, y_up, z_low, 0])
min_tsdf = min(min_tsdf, data_volume[x_up, y_low, z_up, 0])
min_tsdf = min(min_tsdf, data_volume[x_low, y_up, z_up, 0])
min_tsdf = min(min_tsdf, data_volume[x_up, y_up, z_up, 0])
return min_tsdf
@cuda.jit(device=True)
def tsdf_gradient_corrected_smaple(ref_volume, data_volume, volume_gradient, x, y, z,
deoformed_vol_x, deoformed_vol_y, deoformed_vol_z):
grad_x = volume_gradient[x, y, z, 0]
grad_y = volume_gradient[x, y, z, 1]
grad_z = volume_gradient[x, y, z, 2]
ref_tsdf = ref_volume[x, y, z, 0]
ref_weight = ref_volume[x, y, z, 1]
@cuda.jit(device=True)
def cross(x, y, z, x_, y_, z_):
new_x = y * z_ - z * y_
new_y = z * x_ - x * z_
new_z = x * y_ - y * x_
return new_x, new_y, new_z
@cuda.jit(device=True)
def dot(x, y, z, x_, y_, z_):
s = x * x_ + y * y_ + z * z_
return s
@cuda.jit(device=True)
def norm(x, y, z):
return math.sqrt(x * x + y * y + z * z)
@cuda.jit(device=True)
def normalize(x, y, z):
s = math.sqrt(x * x + y * y + z * z)
return x / s, y / s, z / s
@cuda.jit(device=True)
def norm_quaternion(quaternion):
return math.sqrt(quaternion[0] * quaternion[0] +
quaternion[1] * quaternion[1] +
quaternion[2] * quaternion[2] +
quaternion[3] * quaternion[3])
@cuda.jit(device=True)
def square_norm_quaternion(quaternion):
return quaternion[0] * quaternion[0] + quaternion[1] * quaternion[1] + \
quaternion[2] * quaternion[2] + quaternion[3] * quaternion[3]
# region ================= vec3 =================================
@cuda.jit(device=True)
def vec3_cross(vec3_out, vec3_1, vec3_2):
vec3_out[0] = vec3_1[1] * vec3_2[2] - vec3_1[2] * vec3_2[1]
vec3_out[1] = vec3_1[2] * vec3_2[0] - vec3_1[0] * vec3_2[2]
vec3_out[2] = vec3_1[0] * vec3_2[1] - vec3_1[1] * vec3_2[0]
@cuda.jit(device=True)
def vec3_dot(vec3_1, vec3_2):
return vec3_1[0] * vec3_2[0] + vec3_1[1] * vec3_2[1] + vec3_1[2] * vec3_2[2]
@cuda.jit(device=True)
def vec3_elementwise_add(vec3_out, vec3_in):
vec3_out[0] = vec3_out[0] + vec3_in[0]
vec3_out[1] = vec3_out[1] + vec3_in[1]
vec3_out[2] = vec3_out[2] + vec3_in[2]
@cuda.jit(device=True)
def vec3_elementwise_subtract(vec3_out, vec3_in):
vec3_out[0] = vec3_out[0] - vec3_in[0]
vec3_out[1] = vec3_out[1] - vec3_in[1]
vec3_out[2] = vec3_out[2] - vec3_in[2]
@cuda.jit(device=True)
def vec3_elementwise_add_factor(vec3_out, vec3_in, factor):
vec3_out[0] = vec3_out[0] + vec3_in[0] * factor
vec3_out[1] = vec3_out[1] + vec3_in[1] * factor
vec3_out[2] = vec3_out[2] + vec3_in[2] * factor
@cuda.jit(device=True)
def mat3_vec3_inner_product(vec3_out, mat3_in, vec3_in):
vec3_out[0] = mat3_in[0, 0] * vec3_in[0] + mat3_in[0, 1] * vec3_in[1] + mat3_in[0, 2] * vec3_in[2]
vec3_out[1] = mat3_in[1, 0] * vec3_in[0] + mat3_in[1, 1] * vec3_in[1] + mat3_in[1, 2] * vec3_in[2]
vec3_out[2] = mat3_in[2, 0] * vec3_in[0] + mat3_in[2, 1] * vec3_in[1] + mat3_in[2, 2] * vec3_in[2]
# endregion
# region ================= vec4 =================================
@cuda.jit(device=True)
def vec4_elementwise_sub_factor(vec4_out, vec4_in, factor):
vec4_out[0] = vec4_out[0] - vec4_in[0] * factor
vec4_out[1] = vec4_out[1] - vec4_in[1] * factor
vec4_out[2] = vec4_out[2] - vec4_in[2] * factor
vec4_out[3] = vec4_out[3] - vec4_in[3] * factor
@cuda.jit(device=True)
def vec4_dot(a, b):
return a[0] * b[0] + a[1] * b[1] + a[2] * b[2] + a[3] * b[3]
@cuda.jit(device=True)
def vec4_elementwise_mul(vec4_out, vec4_1, vect4_2):
vec4_out[0] = vec4_1[0] * vect4_2[0]
vec4_out[1] = vec4_1[1] * vect4_2[1]
vec4_out[2] = vec4_1[2] * vect4_2[2]
vec4_out[3] = vec4_1[3] * vect4_2[3]
@cuda.jit(device=True)
def vec4_elementwise_mul_factor(vec4_out, vec4_1, vec4_2, factor):
vec4_out[0] = vec4_1[0] * vec4_2[0] * factor
vec4_out[1] = vec4_1[1] * vec4_2[1] * factor
vec4_out[2] = vec4_1[2] * vec4_2[2] * factor
vec4_out[3] = vec4_1[3] * vec4_2[3] * factor
@cuda.jit(device=True)
def vec4_elementwise_add(vec4_out, vec4_1, vec4_2):
vec4_out[0] = vec4_1[0] + vec4_2[0]
vec4_out[1] = vec4_1[1] + vec4_2[1]
vec4_out[2] = vec4_1[2] + vec4_2[2]
vec4_out[3] = vec4_1[3] + vec4_2[3]
# endregion
# region ================= vec (arbitrary length) =================================
@cuda.jit(device=True)
def vec_elementwise_add_factor(vec_out, vec_in, factor):
for i_element in range(vec_out.shape[0]):
vec_out[i_element] = vec_out[i_element] + vec_in[i_element] * factor
@cuda.jit(device=True)
def vec_elementwise_add(result, a, b):
for i_element in range(result.shape[0]):
result[i_element] = a[i_element] + b[i_element]
@cuda.jit(device=True)
def vec_mul_factor(vec_out, factor):
for i_element in range(vec_out.shape[0]):
vec_out[i_element] = vec_out[i_element] * factor
@cuda.jit(device=True)
def normalize_dual_quaternion(dual_quaternion):
real = dual_quaternion[:4]
dual = dual_quaternion[4:]
length = norm_quaternion(real)
squared_length = length * length
# make real part have unit length
for i_real in range(4):
real[i_real] = real[i_real] / length
# make dual part have unit length & orthogonal to real
for i_dual in range(4):
dual[i_dual] = dual[i_dual] / length
dual_delta = vec4_dot(real, dual) * squared_length
vec4_elementwise_sub_factor(dual, real, dual_delta)
# endregion
# region ================= dual_quaternions =================================
@cuda.jit(device=True)
def linearly_blend_dual_quaternions(final_dual_quaternion, dual_quaternions, anchors, weights, workload_index):
# initialize
for i_element in range(8):
final_dual_quaternion[i_element] = 0.0
# add up weighted coefficients
for i_anchor in range(anchors.shape[1]):
anchor = anchors[workload_index, i_anchor]
if anchor != -1:
weight = weights[workload_index, i_anchor]
dual_quaternion = dual_quaternions[anchor]
vec_elementwise_add_factor(final_dual_quaternion, dual_quaternion, weight)
normalize_dual_quaternion(final_dual_quaternion)
return final_dual_quaternion
@cuda.jit(device=True)
def quaternion_product(q_out, q1, q2):
q_out[0] = -q1[1] * q2[1] - q1[2] * q2[2] - q1[3] * q2[3] + q1[0] * q2[0]
q_out[1] = q1[1] * q2[0] + q1[2] * q2[3] - q1[3] * q2[2] + q1[0] * q2[1]
q_out[2] = -q1[1] * q2[3] + q1[2] * q2[0] + q1[3] * q2[1] + q1[0] * q2[2]
q_out[3] = q1[1] * q2[2] - q1[2] * q2[1] + q1[3] * q2[0] + q1[0] * q2[3]
@cuda.jit(device=True)
def dual_quaternion_product(dq_out, dq1, dq2):
"""
Compute product of two dual quaternions (https://github.com/neka-nat/dq3d/blob/master/dq3d/DualQuaternion.h)
Note that dq_out cannot be the same as dq1 or dq2
:param dq_out:
:param dq1:
:param dq2:
:return:
"""
dq1_real = dq1[:4]
dq1_dual = dq1[4:]
dq2_real = dq2[:4]
dq2_dual = dq2[4:]
dq_out_real = dq_out[:4]
dq_out_dual = dq_out[4:]
quaternion_product(dq_out_dual, dq1_real, dq2_dual)
# use dq_out_real as temporary value holder for dq_out_dual
quaternion_product(dq_out_real, dq1_dual, dq2_real)
vec4_elementwise_add(dq_out_dual, dq_out_dual, dq_out_real)
quaternion_product(dq_out_real, dq1_real, dq2_real)
@cuda.jit(device=True)
def dual_quaternion_conjugate(dq_out, dq_in):
dq_out[0] = dq_in[0]
dq_out[1] = -dq_in[1]
dq_out[2] = -dq_in[2]
dq_out[3] = -dq_in[3]
dq_out[4] = dq_in[4]
dq_out[5] = -dq_in[5]
dq_out[6] = -dq_in[6]
dq_out[7] = -dq_in[7]
@cuda.jit(device=True)
def transform_point_by_dual_quaternion(point_out, dual_quaternion,
temp_dual_quaternion_1,
temp_dual_quaternion_2,
temp_dual_quaternion_3,
point):
temp_dual_quaternion_1[0] = 1.0
temp_dual_quaternion_1[1] = 0.0
temp_dual_quaternion_1[2] = 0.0
temp_dual_quaternion_1[3] = 0.0
temp_dual_quaternion_1[4] = 0.0
temp_dual_quaternion_1[5] = point[0]
temp_dual_quaternion_1[6] = point[1]
temp_dual_quaternion_1[7] = point[2]
dual_quaternion_product(temp_dual_quaternion_2, dual_quaternion, temp_dual_quaternion_1)
dual_quaternion_conjugate(temp_dual_quaternion_1, dual_quaternion)
dual_quaternion_product(temp_dual_quaternion_3, temp_dual_quaternion_2, temp_dual_quaternion_1)
point_out[0] = temp_dual_quaternion_3[5]
point_out[1] = temp_dual_quaternion_3[6]
point_out[2] = temp_dual_quaternion_3[7]
# translation
dq_real_w = dual_quaternion[0]
dq_real_vec = dual_quaternion[1:4]
dq_dual_w = dual_quaternion[4]
dq_dual_vec = dual_quaternion[5:]
cross_real_dual_vecs = temp_dual_quaternion_1[:3]
vec3_cross(cross_real_dual_vecs, dq_real_vec, dq_dual_vec)
added_vec = temp_dual_quaternion_2[:3]
added_vec[0] = dq_dual_vec[0] * dq_real_w
added_vec[1] = dq_dual_vec[1] * dq_real_w
added_vec[2] = dq_dual_vec[2] * dq_real_w
vec3_elementwise_add_factor(added_vec, dq_real_vec, -dq_dual_w)
vec3_elementwise_add(added_vec, cross_real_dual_vecs)
vec3_elementwise_add_factor(point_out, added_vec, 2.0)
# endregion
# region ==================== matrix blending ==========================================================================
@cuda.jit(device=True)
def linearly_blend_matrices(warped_point, temp1, temp2, source_point, nodes, node_translations, node_rotations, anchors, weights, workload_index):
# initialize
for i_element in range(3):
warped_point[i_element] = 0.0
# add up node influences
for i_anchor in range(anchors.shape[1]):
anchor = anchors[workload_index, i_anchor]
if anchor != -1:
weight = weights[workload_index, i_anchor]
node = nodes[anchor] # vector3
node_rotation = node_rotations[anchor] # matrix 3x3
node_translation = node_translations[anchor] # vector3
temp1[0] = source_point[0]
temp1[1] = source_point[1]
temp1[2] = source_point[2]
vec3_elementwise_subtract(temp1, node)
mat3_vec3_inner_product(temp2, node_rotation, temp1)
vec3_elementwise_add(temp2, node)
vec3_elementwise_add(temp2, node_translation)
vec3_elementwise_add_factor(warped_point, temp2, weight)
return warped_point
|
from django.conf.urls import url
urlpatterns = [
url(r'^$', 'hello.views.index'),
url(r'^hello/?$', 'hello.views.hello'),
url(r'^hello/([^/]+)$', 'hello.views.hello_user'),
]
|
#!/usr/bin/env python3
from pwn import *
binary = ELF('sort_it')
binary.write(0x1208,5*b'\x90')
binary.save('sort_it_patched')
os.chmod('sort_it_patched',0o755)
|
import os
import sys
import django
# Find the path to the current file and add the path to cbe to the system path
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(BASE_DIR, 'cbe')) #cbe is a sub directory of this file
# Initialize django
os.environ['DJANGO_SETTINGS_MODULE'] = 'cbe.settings'
django.setup()
# Create a superuser
from django.contrib.auth.models import User
if User.objects.filter(username='john').count() == 0:
superuser = User.objects.create_superuser('john', 'john@snow.com', 'johnpassword')
|
# Generated by Django 2.0.6 on 2018-09-24 05:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ContestSpace', '0002_results'),
]
operations = [
migrations.AddField(
model_name='pendingrequests',
name='teamname',
field=models.CharField(default='ab', max_length=100),
),
]
|
from .whcontrol import main
NAME = "WHControl"
|
from datetime import datetime
#############################################################################################################
# Prints a message with a prefix containng the date, time & log level
def Template(Message, Level):
Message = "{0} - [{1}] - {2}".format(datetime.now(), Level, Message)
print(Message)
#############################################################################################################
# Calls the Template Method, passes Message & sets Type as "Error"
def Error(Message):
Template(Message, "Error")
#############################################################################################################
# Calls the Template Method, passes Message & sets Type as "Warning"
def Warning(Message):
Template(Message, "Warning")
#############################################################################################################
# Calls the Template Method, passes Message & sets Type as "Information"
def Information(Message):
Template(Message, "Info")
#############################################################################################################
# Calls the Template Method, passes Message & sets Type as "Verbose"
def Verbose(Message):
Template(Message, "Verbose")
#############################################################################################################
|
import ImportedFile as IF
IF.func()
# <ref>
|
try:
with open("input.txt", "r") as fileContent:
segments = [[segment.split(" -> ")]
for segment in fileContent.readlines()]
segments = [tuple([int(axis) for axis in coordinate.strip().split(",")])
for segment in segments for coordinates in segment for coordinate in coordinates]
segments = [segments[index:index + 2]
for index in range(0, len(segments), 2)]
except FileNotFoundError:
print("[!] The input file was not found. The program will not continue.")
exit(-1)
highestX = 0
highestY = 0
for segment in segments:
for coordinates in segment:
x, y = coordinates[0], coordinates[1]
highestX = x if x > highestX else highestX
highestY = y if y > highestY else highestY
grid = [[0 for _ in range(highestX + 1)] for _ in range(highestY + 1)]
for segment in segments:
start, end = segment[0], segment[1]
x1, y1, x2, y2 = start[0], start[1], end[0], end[1]
if x1 == x2:
for row in range(y1 if y1 < y2 else y2, (y2 + 1) if y1 < y2 else (y1 + 1)):
grid[row][x1] += 1
elif y1 == y2:
for col in range(x1 if x1 < x2 else x2, (x2 + 1) if x1 < x2 else (x1 + 1)):
grid[y1][col] += 1
dangerousAreas = sum([1 for row in grid for cell in row if cell >= 2])
print(dangerousAreas)
|
import functools
import spanda.core as _core
from .typing import *
import spanda.sql.functions as F
def wrap_dataframe(func: Callable) -> Callable:
@functools.wraps(func)
def f(*args, **kwargs):
df = func(*args, **kwargs)
return _core.DataFrameWrapper(df)
return f
def wrap_col_args(func: Callable) -> Callable:
@functools.wraps(func)
def f(*args, **kwargs):
# notice we have other types of columns (such as _SpecialSpandaColumn so we must keep them as is if they are
# in the arguments and not apply F.col(..) - this is why we whitelist str, instead of blacklisting not column)
new_args = [F.col(a) if isinstance(a, str) else a for a in args]
df = func(*new_args, **kwargs)
return df
return f
|
import psycopg2
import configparser
import sys
import os
import pandas as pd
import datetime
sql_template = """
"""
class Connector:
def __init__(self, config_path, system_name, template=sql_template):
'''
parameters
----------
config_path: str
configuration file that include database authentication
[Redshift]
user=######## Login Userid
password=###### Redshift password
host=####### Hostname or USER
port=#### Port number
dbname=##### Database name
dbm: str
it is database management system name as in configuration file
filename: files
The database authentication config file
Methods
-------
conn = connector(config_path, dbm)
Redshift function connects you to Redshift database. First, create cedential file that contains
hostname, username, password, database name and port. For example config.ini
>>conn = connector('config.ini')
Connection is successful
'''
self.config_path = config_path
self.system_name = system_name
self.template = template
def connector(self):
#parser = configparser.ConfigParser()
try:
if os.path.exists(self.config_path):
parser = configparser.ConfigParser()
else:
print("Config not found! Exiting!")
sys.exit(1)
parser.read(self.config_path)
self.host = parser.get(self.system_name, 'host')
self.user =parser.get(self.system_name,'user')
self.password=parser.get(self.system_name,'password')
self.dbname=parser.get(self.system_name,'dbname')
self.port=parser.get(self.system_name,'port')
#if dbm.startswith('Redshift'):
conn = psycopg2.connect(dbname=self.dbname, host=self.host, port=int(self.port), user=self.user, password=self.password)
print(conn)
return conn
except OSError as e:
print('we encountered error {}'.format(e))
def create_data_ts(self):
conn = self.connector(self.config_path, self.system_name)
df = pd.read_sql(self.template, conn)
col_name = list(df.columns)
if len(col_name) < 3:
df[col_name[0]] = pd.to_datetime(df[col_name[0]], format="%Y-%m-%d %H:%M:%S")
df=df.set_index(col_name[0])
return df
else:
df[col_name[1]] = pd.to_datetime(df[col_name[1]], format="%Y-%m-%d %H:%M:%S")
df=df.set_index(col_name[1])
return df
|
#!/usr/bin/env python
from __future__ import print_function
import matplotlib.pylab as plt
import os
from .builder import create_trajectory
def new_line(f):
def wrapper(*args, **kwargs):
print()
f(*args, **kwargs)
print()
return wrapper
def plot_trajectory(name):
STEPS = 600
DELTA = 1 if name != 'linear' else 0.1
trajectory = create_trajectory(name, STEPS)
x = [trajectory.get_position_at(i * DELTA).x for i in range(STEPS)]
y = [trajectory.get_position_at(i * DELTA).y for i in range(STEPS)]
trajectory_fig, trajectory_plot = plt.subplots(1, 1)
trajectory_plot.plot(x, y, label='trajectory', lw=3)
trajectory_plot.set_title(name.title() + ' Trajectory', fontsize=20)
trajectory_plot.set_xlabel(r'$x{\rm[m]}$', fontsize=18)
trajectory_plot.set_ylabel(r'$y{\rm[m]}$', fontsize=18)
trajectory_plot.legend(loc=0)
trajectory_plot.grid()
plt.show()
class TerminateProgramException(Exception):
pass
class Menu:
MENU = """Menu:
1. List Trajectories
2. Plot Trajectory
3. Quit
"""
def __init__(self):
self.trajectories = self._get_trajectories()
self.choices = {
'1': self.list_trajectories,
'2': self.plot_trajectory,
'3': self.quit
}
@staticmethod
def _get_trajectories():
path = os.sep.join(__file__.split(os.sep)[:-1])
trajectories = [file.replace('_trajectory.py', '')
for file in os.listdir(path)
if '_trajectory' in file and '.pyc' not in file]
return {str(i + 1): trajectory
for i, trajectory in enumerate(trajectories)}
def run(self):
while True:
print(self.MENU)
choice = raw_input('Enter an option: ')
try:
self.choices[choice]()
except KeyError:
print('Error: "{}" is not a valid option.'.format(choice))
except TerminateProgramException:
break
@new_line
def list_trajectories(self):
print('Trajectories:')
for k, v in sorted(self.trajectories.items()):
print('{}. {}'.format(k, v))
@new_line
def plot_trajectory(self):
while True:
self.list_trajectories()
choice = raw_input('What trajectory do you want plot? ')
try:
trajectory = self.trajectories[choice]
except KeyError:
print('Invalid option.')
else:
plot_trajectory(trajectory)
break
def quit(self):
raise TerminateProgramException()
|
#!/usr/bin/env python
import argparse
from numpy import ndarray
from LhcVaspTools.BasicUtils import saveData2Json, Vaspdata
def parseArgv() -> argparse.Namespace:
parser: argparse.ArgumentParser = argparse.ArgumentParser(description=
'This script is used to determine the indices of cross \
bands out of \"vaspout.h5\".')
parser.add_argument('input_file_name', nargs='?', type=str,
help='input \"vaspout.h5\" file.')
parser.add_argument('-o', '--output-file', nargs='?', type=str,
dest='output_file_name', help="output file.")
parser.add_argument('-e', '--energy-level', nargs='?', type=float,
dest='energy_level', required=True, help='the energy level where bands cross.')
options: argparse.Namespace = parser.parse_args()
return options
def main() -> int:
options: argparse.Namespace = parseArgv()
input_file_name: str = options.input_file_name
output_file_name: str = options.output_file_name
energy_level: float = options.energy_level
vasp_data: Vaspdata = Vaspdata()
vasp_data.readFile(input_file_name)
cross_bands: list = Vaspdata.deterCrossBands(vasp_data, energy_level).tolist()
if output_file_name is None:
print('cross_bands:\t' + str(cross_bands))
else:
saveData2Json(cross_bands, output_file_name)
return 0
if __name__ == "__main__":
main()
|
class SenderAction(object):
SENDER_ACTIONS = [
'mark_seen',
'typing_on',
'typing_off'
]
def __init__(self, sender_action):
if sender_action not in self.SENDER_ACTIONS:
raise ValueError('Invalid sender_action provided.')
self.sender_action = sender_action
def to_dict(self):
return self.sender_action
|
# -*- coding: utf-8 -*-
import tmonkeysettings as tm
import monkeyutility as mu
import monkeyegg
class UpdateEgg( monkeyegg.MonkeyEgg ):
def __init__(self):
monkeyegg.MonkeyEgg.__init__(self);
def get_commands(self):
return [
( ("update"), self.update )
]
def load(self):
return True
def unload(self):
return True
def update(self, cmdline):
"""Update tmonkey from repository
Usage:update"""
mu.update_dir( tm.TMONKEY_DIR , tm.TMONKEY_SVN_USERNAME , tm.TMONKEY_SVN_PASSWORD )
#reload(tmonkeycore) error while running
|
import os
from .character import Character
class Scanner(object):
"""docstring for Scanner."""
ENDMARK = '\0'
def __init__(self, source_text):
super(Scanner, self).__init__()
self.source_text = source_text
self.last_index = len(source_text) - 1
self.source_index = -1
self.line_index = 1
self.col_index = 0
def __iter__(self):
while True:
char = self.get_next()
char_ahead = self.peek_next();
yield (char, char_ahead)
if char.value is self.ENDMARK or char_ahead.value is None:
raise StopIteration
def get_next(self):
self.source_index += 1
if self.source_index > 0 and self.source_index-1 < len(self.source_text):
if self.source_text[self.source_index-1] == os.linesep: # the previous character was a newline
self.line_index += 1
self.col_index = 0
self.col_index += 1
if self.source_index > self.last_index: # end of source_text
value = self.ENDMARK
else:
value = self.source_text[self.source_index]
return Character(
value,
self.line_index,
self.col_index,
self.source_index
)
def peek_next(self):
source_index = self.source_index
line_index = self.line_index
col_index = self.col_index
value = None
source_index += 1
if source_index > 0 and source_index-1 < len(self.source_text):
if self.source_text[source_index-1] == os.linesep: # the previous character was a newline
line_index += 1
col_index = 0
col_index += 1
if source_index > self.last_index: # end of source_text
value = self.ENDMARK
else:
value = self.source_text[source_index]
else:
line_index = -1
col_index = 0
source_index = -1
return Character(
value,
line_index,
col_index,
source_index
)
def lookahead(self, offset=1):
index = self.source_index + offset
if index > self.last_index:
return self.ENDMARK
return self.source_text[index]
|
from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E07000242"
addresses_name = "local.2019-05-02/Version 1/Democracy_Club__02May2019E Herts.tsv"
stations_name = "local.2019-05-02/Version 1/Democracy_Club__02May2019E Herts.tsv"
elections = ["local.2019-05-02"]
csv_delimiter = "\t"
def address_record_to_dict(self, record):
rec = super().address_record_to_dict(record)
uprn = record.property_urn.strip().lstrip("0")
if uprn == "10033104592":
rec["postcode"] = "CM20 2QP"
if uprn == "10023088041":
rec["postcode"] = "CM20 2FP"
if uprn == "10023090475":
return None
if uprn in [
"100080727137", # CM233QY -> CM235QY : 34 Shortcroft, Bishops Stortford, Herts
"100081117602", # CM232JH -> CM232JJ : Wintersett, 5 Whitehall Lane, Bishops Stortford, Herts
"10033096663", # SG90HH -> SG90EA : Bentleys, Hare Street, Herts
"10033104823", # SG141LR -> SG142PX : Rose Cottage, Goldings Lane, Hertford, Herts
"10034515257", # CM210HH -> CM210DB : 1 Highfield, Crofters, Sawbridgeworth, Herts
"10034515290", # CM210HX -> CM210DB : 36 Highfield, Crofters, Sawbridgeworth, Herts
"10034515291", # CM210BD -> CM210DB : 37 Highfield, Crofters, Sawbridgeworth, Herts
]:
rec["accept_suggestion"] = True
if uprn in [
"100080722649", # CM232QT -> CM232QY : 70 Hadham Road, Bishops Stortford, Herts
"100081118102", # SG90DX -> SG90DY : The Small Barn, Hare Street, Herts
"100080736259", # SG142LB -> SG142LG : The Hill, 236/238 Hertingfordbury Road, Hertford, Herts
"10034620003", # SG142NE -> SG142NA : The Lodge, Marden Hill, Hertford, Herts, Herts
"100081121082", # SG111PW -> SG111PJ : Tregarron, Standon Hill, Ware, Herts
"10033105203", # SG111EZ -> SG111HA : The Gables, Old Hall Green, Standon, Herts
"10033095127", # SG143NE -> SG143NQ : The Beehive Cottage, Woodhall Park, Watton At Stone, Herts
"10033095128", # SG143NE -> SG143NQ : The Garden House, Woodhall Park, Watton At Stone, Herts
"100081120184", # SG111NW -> SG111NY : Bromley Hall, Bromley Lane, Standon, Herts
]:
rec["accept_suggestion"] = False
return rec
|
import sys
from datetime import datetime, timedelta
from office365.sharepoint.client_context import ClientContext
from office365.sharepoint.fields.field_user_value import FieldUserValue
from office365.sharepoint.listitems.listitem import ListItem
from tests import test_team_site_url, test_client_credentials, test_user_principal_name
ctx = ClientContext(test_team_site_url).with_credentials(test_client_credentials)
list_tasks = ctx.web.lists.get_by_title("Tasks")
items = list_tasks.items.get().top(1).execute_query()
if len(items) == 0:
sys.exit("No items for update found")
item_to_update = items[0] # type: ListItem
author = ctx.web.site_users.get_by_email(test_user_principal_name)
modified_date = datetime.utcnow() - timedelta(days=3)
result = item_to_update.validate_update_list_item({
"Title": "Task (updated)",
"Author": FieldUserValue.from_user(author),
"Modified": modified_date
}).execute_query()
has_any_error = any([item.HasException for item in result.value])
if has_any_error:
print("Item update completed with errors, for details refer 'ErrorMessage' property")
else:
print("Item has been updated successfully")
|
import os, nltk, json, argparse
from gensim.models import Word2Vec
def gen_formatted_data_imdbv1(data_dir):
data = []
for filename in os.listdir(data_dir):
file = os.path.join(data_dir, filename)
with open(file, 'r') as f:
content = f.readline().lower()
content_formatted = nltk.word_tokenize(content)
data.append(content_formatted)
return data
def gen_embedding_model_imdbv1(working_dir='../data/aclImdb', embedding_size=50):
fname = os.path.join(working_dir, "imdb_embedding")
if os.path.isfile(fname):
embedding_model = Word2Vec.load(fname)
return embedding_model
train_dir = os.path.join(working_dir, "train")
train_pos_dir = os.path.join(train_dir, "pos")
train_neg_dir = os.path.join(train_dir, "neg")
test_dir = os.path.join(working_dir, "test")
test_pos_dir = os.path.join(test_dir, "pos")
test_neg_dir = os.path.join(test_dir, "neg")
train = gen_formatted_data_imdbv1(train_pos_dir) + gen_formatted_data_imdbv1(train_neg_dir)
test = gen_formatted_data_imdbv1(test_pos_dir) + gen_formatted_data_imdbv1(test_neg_dir)
alldata = train + test
embedding_model = Word2Vec(train, size=embedding_size, window=5, min_count=5)
embedding_model.save(fname)
return embedding_model
def gen_formatted_data_scdata(data_file):
data = []
with open(data_file, 'r') as f:
lines = f.readlines()
for line in lines:
new_content = json.loads(line)
text = nltk.word_tokenize(new_content['text'])
data.append(text)
return data
def gen_embedding_model_scdata(working_dir='../data/sentence_consistency_data', embedding_size=50):
fname = os.path.join(working_dir, "scdata_embedding")
if os.path.isfile(fname):
embedding_model = Word2Vec.load(fname)
return embedding_model
train_file = os.path.join(working_dir, "train_data")
valid_file = os.path.join(working_dir, "valid_data")
train = gen_formatted_data_scdata(train_file)
valid = gen_formatted_data_scdata(valid_file)
alldata = train + valid
embedding_model = Word2Vec(train, size=embedding_size, window=5, min_count=5)
embedding_model.save(fname)
return embedding_model
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process some important parameters.')
parser.add_argument('-depth', '--embed_size', type=int, default=50, help='embedding_size, default 50.')
parser.add_argument('-dn', '--dataname', type=str, default="imdbv1", help='generating embeddings for which data: imdbv1 or scdata.')
args = parser.parse_args()
embed_size = args.embed_size
dataname = args.dataname.lower()
assert dataname in ['imdbv1', 'scdata'], "Unknown dataset."
if dataname == 'imbdv1':
embedding_model = gen_embedding_model_imdbv1(embedding_size=embed_size)
else:
embedding_model = gen_embedding_model_scdata(embedding_size=embed_size)
word1 = "great"
word2 = "horrible"
print("similar words of {}:".format(word1))
print(embedding_model.most_similar('great'))
print("similar words of {}:".format(word2))
print(embedding_model.most_similar('horrible'))
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from .building_type import BuildingType
class MaintainableBuilding(BuildingType):
""" A Massilian building that requires maintenance each year. """
building_maintenance = models.DecimalField(_('Maintenance expenses'), max_digits=4, decimal_places=2)
settings = models.ForeignKey('MassiliaSettings', on_delete=models.CASCADE)
def __str__(self):
return f'{self.number_built} {self.name.title()} -{self.building_maintenance} talents'
|
# -*- coding: utf-8 -*-
"""
=============================
Crack Egg
=============================
This an example of how to crack an egg (take a slice of subjects/lists from it)
"""
# Code source: Andrew Heusser
# License: MIT
#import
import quail
#load data
egg = quail.load('example')
#crack egg
cracked_egg = quail.crack_egg(egg, subjects=[0], lists=[0])
cracked_egg.info()
pres = cracked_egg.get_pres_items().as_matrix()[0]
rec = cracked_egg.get_rec_items().as_matrix()[0]
def distmat(egg, feature, distdict):
f = [xi[feature] for xi in egg.get_pres_features()]
return cdist(f, f, distdict[feature])
for idx in range(len(rec)-1):
ind1 = np.where(pres==rec[idx])[0][0]
ind2 = np.where(pres==rec[idx+1])[0][0]
dists = dist[ind1, :]
cdist = dist[ind1, ind2]
rank = np.mean(np.where(np.sort(dists)[::-1] == cdist))
|
# -*- coding: utf-8 -*-
import KBEngine;
from KBEDebug import *;
import GameUtils;
MAX_FOOD_CNT = 100
class Room(KBEngine.Entity):
def __init__(self):
KBEngine.Entity.__init__(self);
INFO_MSG("Cell Room init ...");
KBEngine.globalData["CellRoom"] = self;
for i in range(MAX_FOOD_CNT):
self.RefreshFood();
def RefreshFood(self):
pos = GameUtils.randomPosition3D();
KBEngine.createEntity("Food", self.spaceID, pos, (0.0, 0.0, 0.0), {});
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/firestore_v1beta1/proto/write.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.cloud.firestore_v1beta1.proto import (
common_pb2 as google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2,
)
from google.cloud.firestore_v1beta1.proto import (
document_pb2 as google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2,
)
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/firestore_v1beta1/proto/write.proto",
package="google.firestore.v1beta1",
syntax="proto3",
serialized_options=b"\n\034com.google.firestore.v1beta1B\nWriteProtoP\001ZAgoogle.golang.org/genproto/googleapis/firestore/v1beta1;firestore\242\002\004GCFS\252\002\036Google.Cloud.Firestore.V1Beta1\312\002\036Google\\Cloud\\Firestore\\V1beta1\352\002!Google::Cloud::Firestore::V1beta1",
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n0google/cloud/firestore_v1beta1/proto/write.proto\x12\x18google.firestore.v1beta1\x1a\x31google/cloud/firestore_v1beta1/proto/common.proto\x1a\x33google/cloud/firestore_v1beta1/proto/document.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"\x9d\x02\n\x05Write\x12\x34\n\x06update\x18\x01 \x01(\x0b\x32".google.firestore.v1beta1.DocumentH\x00\x12\x10\n\x06\x64\x65lete\x18\x02 \x01(\tH\x00\x12@\n\ttransform\x18\x06 \x01(\x0b\x32+.google.firestore.v1beta1.DocumentTransformH\x00\x12;\n\x0bupdate_mask\x18\x03 \x01(\x0b\x32&.google.firestore.v1beta1.DocumentMask\x12@\n\x10\x63urrent_document\x18\x04 \x01(\x0b\x32&.google.firestore.v1beta1.PreconditionB\x0b\n\toperation"\x88\x05\n\x11\x44ocumentTransform\x12\x10\n\x08\x64ocument\x18\x01 \x01(\t\x12T\n\x10\x66ield_transforms\x18\x02 \x03(\x0b\x32:.google.firestore.v1beta1.DocumentTransform.FieldTransform\x1a\x8a\x04\n\x0e\x46ieldTransform\x12\x12\n\nfield_path\x18\x01 \x01(\t\x12\x65\n\x13set_to_server_value\x18\x02 \x01(\x0e\x32\x46.google.firestore.v1beta1.DocumentTransform.FieldTransform.ServerValueH\x00\x12\x34\n\tincrement\x18\x03 \x01(\x0b\x32\x1f.google.firestore.v1beta1.ValueH\x00\x12\x32\n\x07maximum\x18\x04 \x01(\x0b\x32\x1f.google.firestore.v1beta1.ValueH\x00\x12\x32\n\x07minimum\x18\x05 \x01(\x0b\x32\x1f.google.firestore.v1beta1.ValueH\x00\x12G\n\x17\x61ppend_missing_elements\x18\x06 \x01(\x0b\x32$.google.firestore.v1beta1.ArrayValueH\x00\x12\x45\n\x15remove_all_from_array\x18\x07 \x01(\x0b\x32$.google.firestore.v1beta1.ArrayValueH\x00"=\n\x0bServerValue\x12\x1c\n\x18SERVER_VALUE_UNSPECIFIED\x10\x00\x12\x10\n\x0cREQUEST_TIME\x10\x01\x42\x10\n\x0etransform_type"z\n\x0bWriteResult\x12/\n\x0bupdate_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12:\n\x11transform_results\x18\x02 \x03(\x0b\x32\x1f.google.firestore.v1beta1.Value"v\n\x0e\x44ocumentChange\x12\x34\n\x08\x64ocument\x18\x01 \x01(\x0b\x32".google.firestore.v1beta1.Document\x12\x12\n\ntarget_ids\x18\x05 \x03(\x05\x12\x1a\n\x12removed_target_ids\x18\x06 \x03(\x05"m\n\x0e\x44ocumentDelete\x12\x10\n\x08\x64ocument\x18\x01 \x01(\t\x12\x1a\n\x12removed_target_ids\x18\x06 \x03(\x05\x12-\n\tread_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"m\n\x0e\x44ocumentRemove\x12\x10\n\x08\x64ocument\x18\x01 \x01(\t\x12\x1a\n\x12removed_target_ids\x18\x02 \x03(\x05\x12-\n\tread_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"3\n\x0f\x45xistenceFilter\x12\x11\n\ttarget_id\x18\x01 \x01(\x05\x12\r\n\x05\x63ount\x18\x02 \x01(\x05\x42\xdc\x01\n\x1c\x63om.google.firestore.v1beta1B\nWriteProtoP\x01ZAgoogle.golang.org/genproto/googleapis/firestore/v1beta1;firestore\xa2\x02\x04GCFS\xaa\x02\x1eGoogle.Cloud.Firestore.V1Beta1\xca\x02\x1eGoogle\\Cloud\\Firestore\\V1beta1\xea\x02!Google::Cloud::Firestore::V1beta1b\x06proto3',
dependencies=[
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2.DESCRIPTOR,
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2.DESCRIPTOR,
google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
],
)
_DOCUMENTTRANSFORM_FIELDTRANSFORM_SERVERVALUE = _descriptor.EnumDescriptor(
name="ServerValue",
full_name="google.firestore.v1beta1.DocumentTransform.FieldTransform.ServerValue",
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name="SERVER_VALUE_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="REQUEST_TIME",
index=1,
number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
],
containing_type=None,
serialized_options=None,
serialized_start=1103,
serialized_end=1164,
)
_sym_db.RegisterEnumDescriptor(_DOCUMENTTRANSFORM_FIELDTRANSFORM_SERVERVALUE)
_WRITE = _descriptor.Descriptor(
name="Write",
full_name="google.firestore.v1beta1.Write",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="update",
full_name="google.firestore.v1beta1.Write.update",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="delete",
full_name="google.firestore.v1beta1.Write.delete",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="transform",
full_name="google.firestore.v1beta1.Write.transform",
index=2,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="update_mask",
full_name="google.firestore.v1beta1.Write.update_mask",
index=3,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="current_document",
full_name="google.firestore.v1beta1.Write.current_document",
index=4,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="operation",
full_name="google.firestore.v1beta1.Write.operation",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=246,
serialized_end=531,
)
_DOCUMENTTRANSFORM_FIELDTRANSFORM = _descriptor.Descriptor(
name="FieldTransform",
full_name="google.firestore.v1beta1.DocumentTransform.FieldTransform",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="field_path",
full_name="google.firestore.v1beta1.DocumentTransform.FieldTransform.field_path",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="set_to_server_value",
full_name="google.firestore.v1beta1.DocumentTransform.FieldTransform.set_to_server_value",
index=1,
number=2,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="increment",
full_name="google.firestore.v1beta1.DocumentTransform.FieldTransform.increment",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="maximum",
full_name="google.firestore.v1beta1.DocumentTransform.FieldTransform.maximum",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="minimum",
full_name="google.firestore.v1beta1.DocumentTransform.FieldTransform.minimum",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="append_missing_elements",
full_name="google.firestore.v1beta1.DocumentTransform.FieldTransform.append_missing_elements",
index=5,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="remove_all_from_array",
full_name="google.firestore.v1beta1.DocumentTransform.FieldTransform.remove_all_from_array",
index=6,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[_DOCUMENTTRANSFORM_FIELDTRANSFORM_SERVERVALUE,],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="transform_type",
full_name="google.firestore.v1beta1.DocumentTransform.FieldTransform.transform_type",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=660,
serialized_end=1182,
)
_DOCUMENTTRANSFORM = _descriptor.Descriptor(
name="DocumentTransform",
full_name="google.firestore.v1beta1.DocumentTransform",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="document",
full_name="google.firestore.v1beta1.DocumentTransform.document",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="field_transforms",
full_name="google.firestore.v1beta1.DocumentTransform.field_transforms",
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[_DOCUMENTTRANSFORM_FIELDTRANSFORM,],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=534,
serialized_end=1182,
)
_WRITERESULT = _descriptor.Descriptor(
name="WriteResult",
full_name="google.firestore.v1beta1.WriteResult",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="update_time",
full_name="google.firestore.v1beta1.WriteResult.update_time",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="transform_results",
full_name="google.firestore.v1beta1.WriteResult.transform_results",
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1184,
serialized_end=1306,
)
_DOCUMENTCHANGE = _descriptor.Descriptor(
name="DocumentChange",
full_name="google.firestore.v1beta1.DocumentChange",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="document",
full_name="google.firestore.v1beta1.DocumentChange.document",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="target_ids",
full_name="google.firestore.v1beta1.DocumentChange.target_ids",
index=1,
number=5,
type=5,
cpp_type=1,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="removed_target_ids",
full_name="google.firestore.v1beta1.DocumentChange.removed_target_ids",
index=2,
number=6,
type=5,
cpp_type=1,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1308,
serialized_end=1426,
)
_DOCUMENTDELETE = _descriptor.Descriptor(
name="DocumentDelete",
full_name="google.firestore.v1beta1.DocumentDelete",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="document",
full_name="google.firestore.v1beta1.DocumentDelete.document",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="removed_target_ids",
full_name="google.firestore.v1beta1.DocumentDelete.removed_target_ids",
index=1,
number=6,
type=5,
cpp_type=1,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="read_time",
full_name="google.firestore.v1beta1.DocumentDelete.read_time",
index=2,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1428,
serialized_end=1537,
)
_DOCUMENTREMOVE = _descriptor.Descriptor(
name="DocumentRemove",
full_name="google.firestore.v1beta1.DocumentRemove",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="document",
full_name="google.firestore.v1beta1.DocumentRemove.document",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="removed_target_ids",
full_name="google.firestore.v1beta1.DocumentRemove.removed_target_ids",
index=1,
number=2,
type=5,
cpp_type=1,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="read_time",
full_name="google.firestore.v1beta1.DocumentRemove.read_time",
index=2,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1539,
serialized_end=1648,
)
_EXISTENCEFILTER = _descriptor.Descriptor(
name="ExistenceFilter",
full_name="google.firestore.v1beta1.ExistenceFilter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="target_id",
full_name="google.firestore.v1beta1.ExistenceFilter.target_id",
index=0,
number=1,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="count",
full_name="google.firestore.v1beta1.ExistenceFilter.count",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1650,
serialized_end=1701,
)
_WRITE.fields_by_name[
"update"
].message_type = (
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2._DOCUMENT
)
_WRITE.fields_by_name["transform"].message_type = _DOCUMENTTRANSFORM
_WRITE.fields_by_name[
"update_mask"
].message_type = (
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2._DOCUMENTMASK
)
_WRITE.fields_by_name[
"current_document"
].message_type = (
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2._PRECONDITION
)
_WRITE.oneofs_by_name["operation"].fields.append(_WRITE.fields_by_name["update"])
_WRITE.fields_by_name["update"].containing_oneof = _WRITE.oneofs_by_name["operation"]
_WRITE.oneofs_by_name["operation"].fields.append(_WRITE.fields_by_name["delete"])
_WRITE.fields_by_name["delete"].containing_oneof = _WRITE.oneofs_by_name["operation"]
_WRITE.oneofs_by_name["operation"].fields.append(_WRITE.fields_by_name["transform"])
_WRITE.fields_by_name["transform"].containing_oneof = _WRITE.oneofs_by_name["operation"]
_DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name[
"set_to_server_value"
].enum_type = _DOCUMENTTRANSFORM_FIELDTRANSFORM_SERVERVALUE
_DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name[
"increment"
].message_type = (
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2._VALUE
)
_DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name[
"maximum"
].message_type = (
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2._VALUE
)
_DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name[
"minimum"
].message_type = (
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2._VALUE
)
_DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name[
"append_missing_elements"
].message_type = (
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2._ARRAYVALUE
)
_DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name[
"remove_all_from_array"
].message_type = (
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2._ARRAYVALUE
)
_DOCUMENTTRANSFORM_FIELDTRANSFORM.containing_type = _DOCUMENTTRANSFORM
_DOCUMENTTRANSFORM_FIELDTRANSFORM_SERVERVALUE.containing_type = (
_DOCUMENTTRANSFORM_FIELDTRANSFORM
)
_DOCUMENTTRANSFORM_FIELDTRANSFORM.oneofs_by_name["transform_type"].fields.append(
_DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name["set_to_server_value"]
)
_DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name[
"set_to_server_value"
].containing_oneof = _DOCUMENTTRANSFORM_FIELDTRANSFORM.oneofs_by_name["transform_type"]
_DOCUMENTTRANSFORM_FIELDTRANSFORM.oneofs_by_name["transform_type"].fields.append(
_DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name["increment"]
)
_DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name[
"increment"
].containing_oneof = _DOCUMENTTRANSFORM_FIELDTRANSFORM.oneofs_by_name["transform_type"]
_DOCUMENTTRANSFORM_FIELDTRANSFORM.oneofs_by_name["transform_type"].fields.append(
_DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name["maximum"]
)
_DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name[
"maximum"
].containing_oneof = _DOCUMENTTRANSFORM_FIELDTRANSFORM.oneofs_by_name["transform_type"]
_DOCUMENTTRANSFORM_FIELDTRANSFORM.oneofs_by_name["transform_type"].fields.append(
_DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name["minimum"]
)
_DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name[
"minimum"
].containing_oneof = _DOCUMENTTRANSFORM_FIELDTRANSFORM.oneofs_by_name["transform_type"]
_DOCUMENTTRANSFORM_FIELDTRANSFORM.oneofs_by_name["transform_type"].fields.append(
_DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name["append_missing_elements"]
)
_DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name[
"append_missing_elements"
].containing_oneof = _DOCUMENTTRANSFORM_FIELDTRANSFORM.oneofs_by_name["transform_type"]
_DOCUMENTTRANSFORM_FIELDTRANSFORM.oneofs_by_name["transform_type"].fields.append(
_DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name["remove_all_from_array"]
)
_DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name[
"remove_all_from_array"
].containing_oneof = _DOCUMENTTRANSFORM_FIELDTRANSFORM.oneofs_by_name["transform_type"]
_DOCUMENTTRANSFORM.fields_by_name[
"field_transforms"
].message_type = _DOCUMENTTRANSFORM_FIELDTRANSFORM
_WRITERESULT.fields_by_name[
"update_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_WRITERESULT.fields_by_name[
"transform_results"
].message_type = (
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2._VALUE
)
_DOCUMENTCHANGE.fields_by_name[
"document"
].message_type = (
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2._DOCUMENT
)
_DOCUMENTDELETE.fields_by_name[
"read_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_DOCUMENTREMOVE.fields_by_name[
"read_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
DESCRIPTOR.message_types_by_name["Write"] = _WRITE
DESCRIPTOR.message_types_by_name["DocumentTransform"] = _DOCUMENTTRANSFORM
DESCRIPTOR.message_types_by_name["WriteResult"] = _WRITERESULT
DESCRIPTOR.message_types_by_name["DocumentChange"] = _DOCUMENTCHANGE
DESCRIPTOR.message_types_by_name["DocumentDelete"] = _DOCUMENTDELETE
DESCRIPTOR.message_types_by_name["DocumentRemove"] = _DOCUMENTREMOVE
DESCRIPTOR.message_types_by_name["ExistenceFilter"] = _EXISTENCEFILTER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Write = _reflection.GeneratedProtocolMessageType(
"Write",
(_message.Message,),
{
"DESCRIPTOR": _WRITE,
"__module__": "google.cloud.firestore_v1beta1.proto.write_pb2",
"__doc__": """A write on a document.
Attributes:
operation:
The operation to execute.
update:
A document to write.
delete:
A document name to delete. In the format: ``projects/{project_
id}/databases/{database_id}/documents/{document_path}``.
transform:
Applies a transformation to a document. At most one
``transform`` per document is allowed in a given request. An
``update`` cannot follow a ``transform`` on the same document
in a given request.
update_mask:
The fields to update in this write. This field can be set
only when the operation is ``update``. If the mask is not set
for an ``update`` and the document exists, any existing data
will be overwritten. If the mask is set and the document on
the server has fields not covered by the mask, they are left
unchanged. Fields referenced in the mask, but not present in
the input document, are deleted from the document on the
server. The field paths in this mask must not contain a
reserved field name.
current_document:
An optional precondition on the document. The write will fail
if this is set and not met by the target document.
""",
# @@protoc_insertion_point(class_scope:google.firestore.v1beta1.Write)
},
)
_sym_db.RegisterMessage(Write)
DocumentTransform = _reflection.GeneratedProtocolMessageType(
"DocumentTransform",
(_message.Message,),
{
"FieldTransform": _reflection.GeneratedProtocolMessageType(
"FieldTransform",
(_message.Message,),
{
"DESCRIPTOR": _DOCUMENTTRANSFORM_FIELDTRANSFORM,
"__module__": "google.cloud.firestore_v1beta1.proto.write_pb2",
"__doc__": """A transformation of a field of the document.
Attributes:
field_path:
The path of the field. See
[Document.fields][google.firestore.v1beta1.Document.fields]
for the field path syntax reference.
transform_type:
The transformation to apply on the field.
set_to_server_value:
Sets the field to the given server value.
increment:
Adds the given value to the field’s current value. This must
be an integer or a double value. If the field is not an
integer or double, or if the field does not yet exist, the
transformation will set the field to the given value. If
either of the given value or the current field value are
doubles, both values will be interpreted as doubles. Double
arithmetic and representation of double values follow IEEE 754
semantics. If there is positive/negative integer overflow, the
field is resolved to the largest magnitude positive/negative
integer.
maximum:
Sets the field to the maximum of its current value and the
given value. This must be an integer or a double value. If
the field is not an integer or double, or if the field does
not yet exist, the transformation will set the field to the
given value. If a maximum operation is applied where the field
and the input value are of mixed types (that is - one is an
integer and one is a double) the field takes on the type of
the larger operand. If the operands are equivalent (e.g. 3 and
3.0), the field does not change. 0, 0.0, and -0.0 are all
zero. The maximum of a zero stored value and zero input value
is always the stored value. The maximum of any numeric value x
and NaN is NaN.
minimum:
Sets the field to the minimum of its current value and the
given value. This must be an integer or a double value. If
the field is not an integer or double, or if the field does
not yet exist, the transformation will set the field to the
input value. If a minimum operation is applied where the field
and the input value are of mixed types (that is - one is an
integer and one is a double) the field takes on the type of
the smaller operand. If the operands are equivalent (e.g. 3
and 3.0), the field does not change. 0, 0.0, and -0.0 are all
zero. The minimum of a zero stored value and zero input value
is always the stored value. The minimum of any numeric value x
and NaN is NaN.
append_missing_elements:
Append the given elements in order if they are not already
present in the current field value. If the field is not an
array, or if the field does not yet exist, it is first set to
the empty array. Equivalent numbers of different types
(e.g. 3L and 3.0) are considered equal when checking if a
value is missing. NaN is equal to NaN, and Null is equal to
Null. If the input contains multiple equivalent values, only
the first will be considered. The corresponding
transform_result will be the null value.
remove_all_from_array:
Remove all of the given elements from the array in the field.
If the field is not an array, or if the field does not yet
exist, it is set to the empty array. Equivalent numbers of
the different types (e.g. 3L and 3.0) are considered equal
when deciding whether an element should be removed. NaN is
equal to NaN, and Null is equal to Null. This will remove all
equivalent values if there are duplicates. The corresponding
transform_result will be the null value.
""",
# @@protoc_insertion_point(class_scope:google.firestore.v1beta1.DocumentTransform.FieldTransform)
},
),
"DESCRIPTOR": _DOCUMENTTRANSFORM,
"__module__": "google.cloud.firestore_v1beta1.proto.write_pb2",
"__doc__": """A transformation of a document.
Attributes:
document:
The name of the document to transform.
field_transforms:
The list of transformations to apply to the fields of the
document, in order. This must not be empty.
""",
# @@protoc_insertion_point(class_scope:google.firestore.v1beta1.DocumentTransform)
},
)
_sym_db.RegisterMessage(DocumentTransform)
_sym_db.RegisterMessage(DocumentTransform.FieldTransform)
WriteResult = _reflection.GeneratedProtocolMessageType(
"WriteResult",
(_message.Message,),
{
"DESCRIPTOR": _WRITERESULT,
"__module__": "google.cloud.firestore_v1beta1.proto.write_pb2",
"__doc__": """The result of applying a write.
Attributes:
update_time:
The last update time of the document after applying the write.
Not set after a ``delete``. If the write did not actually
change the document, this will be the previous update_time.
transform_results:
The results of applying each [DocumentTransform.FieldTransform
][google.firestore.v1beta1.DocumentTransform.FieldTransform],
in the same order.
""",
# @@protoc_insertion_point(class_scope:google.firestore.v1beta1.WriteResult)
},
)
_sym_db.RegisterMessage(WriteResult)
DocumentChange = _reflection.GeneratedProtocolMessageType(
"DocumentChange",
(_message.Message,),
{
"DESCRIPTOR": _DOCUMENTCHANGE,
"__module__": "google.cloud.firestore_v1beta1.proto.write_pb2",
"__doc__": """A [Document][google.firestore.v1beta1.Document] has changed. May be
the result of multiple [writes][google.firestore.v1beta1.Write],
including deletes, that ultimately resulted in a new value for the
[Document][google.firestore.v1beta1.Document]. Multiple
[DocumentChange][google.firestore.v1beta1.DocumentChange] messages may
be returned for the same logical change, if multiple targets are
affected.
Attributes:
document:
The new state of the
[Document][google.firestore.v1beta1.Document]. If ``mask`` is
set, contains only fields that were updated or added.
target_ids:
A set of target IDs of targets that match this document.
removed_target_ids:
A set of target IDs for targets that no longer match this
document.
""",
# @@protoc_insertion_point(class_scope:google.firestore.v1beta1.DocumentChange)
},
)
_sym_db.RegisterMessage(DocumentChange)
DocumentDelete = _reflection.GeneratedProtocolMessageType(
"DocumentDelete",
(_message.Message,),
{
"DESCRIPTOR": _DOCUMENTDELETE,
"__module__": "google.cloud.firestore_v1beta1.proto.write_pb2",
"__doc__": """A [Document][google.firestore.v1beta1.Document] has been deleted. May
be the result of multiple [writes][google.firestore.v1beta1.Write],
including updates, the last of which deleted the
[Document][google.firestore.v1beta1.Document]. Multiple
[DocumentDelete][google.firestore.v1beta1.DocumentDelete] messages may
be returned for the same logical delete, if multiple targets are
affected.
Attributes:
document:
The resource name of the
[Document][google.firestore.v1beta1.Document] that was
deleted.
removed_target_ids:
A set of target IDs for targets that previously matched this
entity.
read_time:
The read timestamp at which the delete was observed. Greater
or equal to the ``commit_time`` of the delete.
""",
# @@protoc_insertion_point(class_scope:google.firestore.v1beta1.DocumentDelete)
},
)
_sym_db.RegisterMessage(DocumentDelete)
DocumentRemove = _reflection.GeneratedProtocolMessageType(
"DocumentRemove",
(_message.Message,),
{
"DESCRIPTOR": _DOCUMENTREMOVE,
"__module__": "google.cloud.firestore_v1beta1.proto.write_pb2",
"__doc__": """A [Document][google.firestore.v1beta1.Document] has been removed from
the view of the targets. Sent if the document is no longer relevant
to a target and is out of view. Can be sent instead of a
DocumentDelete or a DocumentChange if the server can not send the new
value of the document. Multiple
[DocumentRemove][google.firestore.v1beta1.DocumentRemove] messages may
be returned for the same logical write or delete, if multiple targets
are affected.
Attributes:
document:
The resource name of the
[Document][google.firestore.v1beta1.Document] that has gone
out of view.
removed_target_ids:
A set of target IDs for targets that previously matched this
document.
read_time:
The read timestamp at which the remove was observed. Greater
or equal to the ``commit_time`` of the change/delete/remove.
""",
# @@protoc_insertion_point(class_scope:google.firestore.v1beta1.DocumentRemove)
},
)
_sym_db.RegisterMessage(DocumentRemove)
ExistenceFilter = _reflection.GeneratedProtocolMessageType(
"ExistenceFilter",
(_message.Message,),
{
"DESCRIPTOR": _EXISTENCEFILTER,
"__module__": "google.cloud.firestore_v1beta1.proto.write_pb2",
"__doc__": """A digest of all the documents that match a given target.
Attributes:
target_id:
The target ID to which this filter applies.
count:
The total count of documents that match [target_id][google.fir
estore.v1beta1.ExistenceFilter.target_id]. If different from
the count of documents in the client that match, the client
must manually determine which documents no longer match the
target.
""",
# @@protoc_insertion_point(class_scope:google.firestore.v1beta1.ExistenceFilter)
},
)
_sym_db.RegisterMessage(ExistenceFilter)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
# -*- coding: utf-8 -*-
# Copyright (C) 2016-2017 Cordell Bloor
# Published under the MIT License
"""Find C or C++ header files with incorrect or missing include guards."""
from __future__ import print_function
import argparse
import sys
import os
from functools import partial
from .pattern_compiler import compile_pattern, ParserError
from .util import (guess_guard, index_guard_start, index_guard_end,
index_pragma_once, get_file_contents, apply_to_headers)
__version__ = "2.4.0"
def is_reserved_token(token):
return token[0] == '_' or token.find('__') != -1
def is_protected_by_guard(contents, guard_symbol):
try:
if guard_symbol:
index_guard_start(contents, guard_symbol)
else:
guess_guard(contents)
index_guard_end(contents)
return True
except ValueError:
return False
def is_protected_by_once(contents):
try:
index_pragma_once(contents)
return True
except ValueError:
return False
def is_protected(contents, options):
return (options.accept_guard and is_protected_by_guard(contents, options.guard)
or options.accept_once and is_protected_by_once(contents))
def is_file_protected(filename, options):
contents, metadata = get_file_contents(filename)
return is_protected(contents, options)
def process_file(filepath, filename, options):
class Context:
pass
ctx = Context()
ctx.filepath = filepath
ctx.filename = filename
options.guard = options.create_guard(ctx)
ok = True
if options.print_guard:
print(options.guard)
return ok
try:
if not is_file_protected(filepath, options):
print(filepath)
except Exception as e:
ok = False
print('Error processing {0}:\n\t({1}) {2}'.format(filepath,
e.__class__.__name__, str(e)), file=sys.stderr)
return ok
def process_pattern(guard_pattern):
create_guard = lambda ctx: None
if guard_pattern is not None:
try:
create_guard = compile_pattern(guard_pattern)
except ParserError as e:
print(e, file=sys.stderr)
sys.exit(1)
return create_guard
def main():
parser = argparse.ArgumentParser(
description='Find C or C++ header files with incorrect or missing '
'include guards.')
parser.add_argument('files',
metavar='file',
nargs='+',
help='the file(s) to check; directories require the recursive '
'option')
parser.add_argument('-V','--version', action='version',
version='%(prog)s ' + __version__)
parser.add_argument('-r','--recursive',
action='store_true',
dest='recursive',
help='recursively search directories for headers')
parser.add_argument('-p','--pattern',
default=None,
metavar='pattern',
help='check that include guards match the specified pattern. For '
"example, -p 'name | upper' would create an expectation that "
'Match.h has the include guard MATCH_H. See the docs on GitHub '
'for a full description of the guard pattern language.')
parser.add_argument('-e','--exclude',
action='append',
dest='exclusions',
metavar='pattern',
default=[],
help='exclude files that match the given fnmatch pattern. '
'Any * is a wildcard matching everything; '
'a ? matches any single character; '
'[_] matches any characters within the brackets; '
'and [!_] matches any characters not within the brackets.')
parser.add_argument('-o','--only',
dest='type',
metavar='type',
default='any',
choices=['guard','once','g','o'],
help='only accept the specified type of include protection. '
"Use 'guard' or 'g' to only accept include guards, or "
"use 'once' or 'o' to only accept #pragma once.")
parser.add_argument('-n','--print-guard',
action='store_true',
dest='print_guard',
help='skip the check and instead print the include guards generated '
'by --pattern.')
args = parser.parse_args()
if args.print_guard and args.pattern is None:
print('Cannot print expected guard without guard pattern. Specify --pattern.', file=sys.stderr)
sys.exit(1)
class Options:
pass
options = Options()
options.accept_guard = args.type in ['g', 'guard', 'any']
options.accept_once = args.type in ['o', 'once', 'any']
options.create_guard = process_pattern(args.pattern)
options.print_guard = args.print_guard
ok = True
for f in args.files:
if os.path.isdir(f):
if args.recursive:
process = partial(process_file, options=options)
ok &= apply_to_headers(process, f, args.exclusions)
else:
print('"%s" is a directory' % f, file=sys.stderr)
sys.exit(1)
else:
ok &= process_file(f, os.path.basename(f), options)
if not ok:
sys.exit(1)
if __name__ == "__main__":
main()
|
import os
import re
import subprocess
from functools import partial
from typing import List, NewType, cast
from utils import get_console_encoding
console_encoding = get_console_encoding()
Drive = NewType("Drive", str)
# Get a list of logical drive letters
def get_logical_drives() -> List[Drive]:
result = subprocess.run("wmic logicaldisk where drivetype=3 get caption", capture_output=True)
# Example stdout: b'Caption \r\r\nC: \r\r\nD: \r\r\nE: \r\r\n\r\r\n'
return cast(List[Drive], [
l[:2] # only the first two characters of each line (ex: 'C:')
for l in (
result.stdout # output of the command, as a bytes object
.decode() # turned into a string
.split('\n') # split into a list of lines
[1:-2] # skip the first line with "Caption", and last two empty lines
)
]) # Example output: ['C:', 'D:', 'E:']
# Find out the drive where the system is installed on
def get_system_drive() -> Drive:
return cast(Drive, os.getenv("systemdrive", "C:")) # Example output: 'C:'
def schedule_check(drive: Drive, *, force: bool = False):
# Setting the dirty bit has proven to cause troubles for some users,
# the check being done on every restart, regardless of it being successful or not.
# This is why I'm switching it to a less invasive method, while still leaving the original
# option available for external usage.
if force:
subprocess.run(f"fsutil dirty set {drive}", capture_output=True)
else:
subprocess.run(f"chkntfs /C {drive}", capture_output=True)
# Declare a special wrapper function for chkdsk, that can automatically answer with No
# if the disk couldn't be locked. Return value is the error code it returned.
def run_chkdsk(drive: Drive, options: str = '') -> int:
command = f"chkdsk {drive}"
if options:
command += f" {options}"
process = subprocess.Popen(
command,
bufsize=0,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
assert process.stdin is not None
assert process.stdout is not None
process.stdin.write(b"\n")
key = None
line_array = []
read = partial(process.stdout.read, 1)
for byte in iter(read, b''):
line_array.append(byte)
if byte == b'\r':
# read one more to see if '\n' follows
following = read()
if following == b'\n':
line_array.append(following)
# we have a whole line
line = b''.join(line_array).decode(console_encoding)
if following != b'\n':
# if it wasn't a new line, then we just put it back for the next line
line_array = [following]
else:
line_array = []
print(line, end='')
match = re.search(r"\(./(.)\)", line)
if match:
key = match[1]
break
if key is not None:
print(key)
# deny check on the next restart
process.communicate(input=f"{key}\n".encode(console_encoding), timeout=1)
else:
# completed successfully
process.communicate(timeout=1)
process.wait() # wait until the process terminates
print() # insert additional newline
return process.returncode
def run_sfc():
subprocess.run("sfc /scannow")
def run_dism():
subprocess.run("DISM /Online /Cleanup-Image /RestoreHealth")
|
# --------------
#Code starts here
senior_citizens=census[census[:,0]>60]
working_hours_sum=np.sum(senior_citizens[:,6])
senior_citizens_len=senior_citizens[:,0].size
avg_working_hours=working_hours_sum/senior_citizens_len
print(avg_working_hours)
# --------------
#Code starts here
race_0=census[census[:,2]==0]
race_1=census[census[:,2]==1]
race_2=census[census[:,2]==2]
race_3=census[census[:,2]==3]
race_4=census[census[:,2]==4]
len_0=len(race_0)
len_1=len(race_1)
len_2=len(race_2)
len_3=len(race_3)
len_4=len(race_4)
if len_0==min(len_0,len_1,len_2,len_3,len_4): minority_race=0
elif len_1==min(len_0,len_1,len_2,len_3,len_4): minority_race=1
elif len_2==min(len_0,len_1,len_2,len_3,len_4): minority_race=2
elif len_3==min(len_0,len_1,len_2,len_3,len_4): minority_race=3
elif len_4==min(len_0,len_1,len_2,len_3,len_4): minority_race=4
# --------------
age=census[0:,0]
max_age=np.max(age)
min_age=np.min(age)
age_mean=np.mean(age)
age_std=np.std(age)
# --------------
#Code starts here
high=census[census[:,1]>10]
low=census[census[:,1]<=10]
avg_pay_high=high[:,7].mean()
avg_pay_low=low[:,7].mean()
print(avg_pay_high,avg_pay_low)
# --------------
# Importing header files
import numpy as np
# Path of the file has been stored in variable called 'path'
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Code starts here
data=np.array(np.genfromtxt(path,delimiter=",",skip_header=1))
census=np.array(np.concatenate((new_record,data)))
print(census)
|
#!/usr/bin/env python
# The listen action in a task plan
import rospy
import actionlib
from task_executor.abstract_step import AbstractStep
from actionlib_msgs.msg import GoalStatus
from hlpr_speech_msgs.srv import SpeechService
class ListenAction(AbstractStep):
# Currently we use the service to get the speech; perhaps we should use the
# the topic instead?
SPEECH_CMD_SERVICE_NAME = "/get_last_speech_cmd"
def init(self, name):
self.name = name
# The speech service
self._speech_cmd_srv = rospy.ServiceProxy(
ListenAction.SPEECH_CMD_SERVICE_NAME,
SpeechService
)
# Set a stopped flag
self._stopped = False
# Wait for a connection to the speech service
rospy.loginfo("Connecting to the speech recognition...")
self._speech_cmd_srv.wait_for_service()
rospy.loginfo("...speech recognition connected")
def run(self, expected_cmd=None):
# Run until we hear a command. If expected_cmd is not None, and the
# received command is not in the list of expected_cmd, then abort
rospy.loginfo("Action {}: Listening.{}".format(
self.name,
"" if expected_cmd is None else " Expected: {}".format(expected_cmd)
))
self._stopped = False
received_cmd = None
while received_cmd is None and not self._stopped:
try:
received_cmd = self._speech_cmd_srv(True).speech_cmd
except rospy.ServiceException as e:
received_cmd = None
yield self.set_running()
# Make only one note of us calling the listen service
self.notify_service_called(ListenAction.SPEECH_CMD_SERVICE_NAME)
# Check whether we were stopped. If not, check the expected_cmd and
# return success or fail accordingly
if self._stopped:
yield self.set_preempted(
action=self.name,
goal=expected_cmd,
received_cmd=received_cmd
)
elif expected_cmd is not None and received_cmd not in expected_cmd:
yield self.set_aborted(
action=self.name,
goal=expected_cmd,
received_cmd=received_cmd
)
else: # expected_cmd is None or received_cmd in expected_cmd
yield self.set_succeeded(cmd=received_cmd)
def stop(self):
self._stopped = True
|
#!/usr/bin/env python
import sys, time
import serial
from pexpect import fdpexpect
# Speech synthesis is optional and requires espeak (for Linux)
try:
import pyttsx3
enable_speech = True
except ImportError:
enable_speech = False
def check_prompt(AP):
time.sleep(0.1)
AP.send('\n')
time.sleep(0.1)
AP.expect('apboot>')
time.sleep(0.1)
AP.send('\n')
time.sleep(0.1)
AP.expect('apboot>')
# Create serial connection
AP_serial = serial.Serial(sys.argv[-1], 9600)
AP = fdpexpect.fdspawn(AP_serial)
# Setup TTS
if enable_speech:
tts = pyttsx3.init()
tts.say('Started')
tts.runAndWait()
# Keep on listening, untill the expect timesout (1h)
while True:
print('\n\n\n\n')
print('Waiting for power... ', end='', flush=True)
AP.expect('APBoot ', timeout=3600)
time.sleep(0.1)
print('done!')
# Enter the bootloader
print('Waiting for boot... ', end='', flush=True)
AP.expect('Hit <Enter>')
time.sleep(0.1)
print('done!')
time.sleep(0.1)
print('Entering the bootloader...', end='', flush=True)
check_prompt(AP)
print('done!')
time.sleep(0.1)
# Get the AP name
print('Asking for name... ', end='', flush=True)
AP.send('printenv name\n') # PY-AP215-074
time.sleep(0.1)
name = ''
while name == '':
input_string = AP.readline().decode("utf-8")
if input_string[:5] == 'name=':
name = input_string[5:].strip('\r\n')
print(name)
check_prompt(AP)
time.sleep(0.1)
# Purge all the variables in one go
print('Purging all variables... ', end='', flush=True)
AP.send('purgeenv\n')
check_prompt(AP)
print('done!')
time.sleep(0.1)
# Put back the name
print('Setting original name... ', end='', flush=True)
AP.send('setenv name ' + name + '\n')
check_prompt(AP)
print('done!')
time.sleep(0.1)
# Save the new variables
print('Saving variables... ', end='', flush=True)
AP.send('saveenv\n')
check_prompt(AP)
print('done!')
time.sleep(0.1)
# Say the number of the AP
if enable_speech:
tts.say(str(int(name.split('-')[-1])) + ' done')
tts.runAndWait()
|
def test_metronome(dcos_api_session):
job = {
'description': 'Test Metronome API regressions',
'id': 'test.metronome',
'run': {
'cmd': 'ls',
'docker': {'image': 'busybox:latest'},
'cpus': 1,
'mem': 512,
'disk': 0,
'user': 'nobody',
'restart': {'policy': 'ON_FAILURE'}
}
}
dcos_api_session.metronome_one_off(job)
|
from django.conf.urls import url
from django.views.generic import TemplateView
from . import views
urlpatterns = [
url(r'^$', views.ContactView.as_view(), name="form"),
url(r'^/success$', TemplateView.as_view(template_name="contact/success.html"), name="success"),
]
|
from bika.lims import bikaMessageFactory as _, t
from bika.lims import logger
from bika.lims.browser import BrowserView
from bika.lims.config import POINTS_OF_CAPTURE
from bika.lims.idserver import renameAfterCreation
from bika.lims.interfaces import IResultOutOfRange
from bika.lims.utils import isnumber
from bika.lims.utils import to_utf8, encode_header, createPdf, attachPdf
from bika.lims.utils import to_utf8, formatDecimalMark, format_supsub
from bika.lims.utils.analysis import format_uncertainty
#from bika.lims.vocabularies import getARReportTemplates
from DateTime import DateTime
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.Utils import formataddr
from operator import itemgetter
from os.path import join
from plone.resource.utils import iterDirectoriesOfType, queryResourceDirectory
from Products.CMFCore.utils import getToolByName
from Products.CMFCore.WorkflowCore import WorkflowException
from Products.CMFPlone.utils import safe_unicode, _createObjectByType
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from smtplib import SMTPServerDisconnected, SMTPRecipientsRefused
from zope.component import getAdapters, getUtility
from plone.registry.interfaces import IRegistry
import App
import glob, os, sys, traceback
import Globals
import re
import tempfile
import urllib2
class OrderPublishView(BrowserView):
template = ViewPageTemplateFile("templates/order_publish.pt")
_products = []
_current_product_index = 0
_publish = False
def __init__(self, context, request, publish=False):
super(OrderPublishView, self).__init__(context, request)
self._publish = publish
self._products = [self.context]
@property
def _DEFAULT_TEMPLATE(self):
registry = getUtility(IRegistry)
return registry.get(
'bika.lims.order.default_order_template', 'default.pt')
def __call__(self):
if self.context.portal_type == 'Order':
self._products = [self.context]
elif self.context.portal_type == 'OrderFolder' \
and self.request.get('items',''):
uids = self.request.get('items').split(',')
uc = getToolByName(self.context, 'uid_catalog')
self._products = [obj.getObject() for obj in uc(UID=uids)]
else:
#Do nothing
self.destination_url = self.request.get_header("referer",
self.context.absolute_url())
# Do publish?
if self.request.form.get('publish', '0') == '1':
self.publishFromPOST()
else:
return self.template()
def showOptions(self):
""" Returns true if the options top panel will be displayed
in the template
"""
return self.request.get('pub', '1') == '1';
def getOrderTemplate(self):
templates_dir = 'templates/sheets'
embedt = self.request.form.get('template', self._DEFAULT_TEMPLATE)
if embedt.find(':') >= 0:
prefix, template = embedt.split(':')
templates_dir = queryResourceDirectory('sheets', prefix).directory
embedt = template
embed = ViewPageTemplateFile(os.path.join(templates_dir, embedt))
reptemplate = ""
try:
reptemplate = embed(self)
except:
tbex = traceback.format_exc()
arid = self._products[self._current_product_index].id
reptemplate = "<div class='error-report'>%s - %s '%s':<pre>%s</pre></div>" % (arid, _("Unable to load the template"), embedt, tbex)
self._nextProduct()
return reptemplate
def getOrderSheetStyle(self):
""" Returns the css style to be used for the current template.
If the selected template is 'default.pt', this method will
return the content from 'default.css'. If no css file found
for the current template, returns empty string
"""
template = self.request.form.get('template', self._DEFAULT_TEMPLATE)
content = ''
if template.find(':') >= 0:
prefix, template = template.split(':')
resource = queryResourceDirectory('sheets', prefix)
css = '{0}.css'.format(template[:-3])
if css in resource.listDirectory():
content = resource.readFile(css)
else:
this_dir = os.path.dirname(os.path.abspath(__file__))
templates_dir = os.path.join(this_dir, 'templates/sheets/')
path = '%s/%s.css' % (templates_dir, template[:-3])
with open(path, 'r') as content_file:
content = content_file.read()
return content
def getProducts(self):
""" Returns a dict with the order entries
"""
return self._products;
def getProductsCount(self):
""" Returns the number of product orders to manage
"""
return len(self._products);
def getOrderObj(self):
""" Returns the order obj
"""
return self._products[self._current_product_index]
def getOrder(self):
""" Returns the dict for the current product
"""
return self._order_data(self._products[self._current_product_index])
def _nextProduct(self):
""" Move to the next product
"""
if self._current_product_index < len(self._products):
self._current_product_index += 1
def _order_data(self, order, excludearuids=[]):
""" Creates an order dict, accessible from the view and from each
specific template.
"""
data = {'obj': order,
'id': order.getId(),
'order_number': order.getOrderNumber(),
'title': order.Title(),
'description': order.Description(),
'supplier_id': order.getSupplierUID(),
'date_dispatched': self.ulocalized_time(order.getDateDispatched(), long_format=1),
'remarks': order.getRemarks(),
'date_published': self.ulocalized_time(DateTime(), long_format=1),
'subtotal': order.getSubtotal(),
'vat_amount': order.getVATAmount(),
'url': order.absolute_url(),
'remarks': to_utf8(order.getRemarks()),
'footer': to_utf8(self.context.bika_setup.getResultFooter()),
}
data['supplier'] = self._supplier_data(order)
# Get the Product List for the Order
# print order.order_lineitems
items = order.order_lineitems
# products = order.aq_parent.objectValues('Product')
products = self.context.get_supplier_products()
item_list = []
grand_total = 0.00
for item in items:
withvat_price = 0.00
prodid = item['Product']
product = [pro for pro in products if pro.getId() == prodid][0]
price = float(item['Price'])
vat = float(item['VAT'])
qty = float(item['Quantity'])
withvat_price = price * qty * ((vat /100) + 1)
item_list.append({
'title': product.Title(),
'description': product.Description(),
'unit': product.getUnit(),
'price': price,
'vat': '%s%%' % vat,
'quantity': qty,
'subtotal': '%.2f' % (price * qty),
'withvat' : '%.2f' % (withvat_price)
})
grand_total += withvat_price
item_list = sorted(item_list, key = itemgetter('title'))
data['products'] = item_list
data["grandTotal"] = '%.2f' % grand_total
return data
def _supplier_data(self, order):
data = {}
supplier = order.aq_parent
if supplier:
data['obj'] = supplier
data['id'] = supplier.id
data['title'] = supplier.Title()
data['url'] = supplier.absolute_url()
data['name'] = to_utf8(supplier.getName())
data['phone'] = to_utf8(supplier.getPhone())
data['fax'] = to_utf8(supplier.getFax())
supplier_address = supplier.getPostalAddress()
if supplier_address:
_keys = ['address', 'city', 'state', 'zip', 'country']
_list = ["<div>%s</div>" % supplier_address.get(v) for v in _keys
if supplier_address.get(v)]
supplier_address = "".join(_list)
else:
supplier_address = ''
data['address'] = to_utf8(supplier_address)
data['email'] = to_utf8(supplier.getEmailAddress())
return data
def localise_images(self, htmlreport):
"""WeasyPrint will attempt to retrieve attachments directly from the URL
referenced in the HTML report, which may refer back to a single-threaded
(and currently occupied) zeoclient, hanging it. All "attachments"
using urls ending with at_download/AttachmentFile must be converted
to local files.
Returns a list of files which were created, and a modified copy
of htmlreport.
"""
cleanup = []
_htmltext = to_utf8(htmlreport)
# first regular image tags
for match in re.finditer("""http.*at_download\/AttachmentFile""", _htmltext, re.I):
url = match.group()
att_path = url.replace(self.portal_url+"/", "")
attachment = self.portal.unrestrictedTraverse(att_path)
af = attachment.getAttachmentFile()
filename = af.filename
extension = "."+filename.split(".")[-1]
outfile, outfilename = tempfile.mkstemp(suffix=extension)
outfile = open(outfilename, 'wb')
outfile.write(str(af.data))
outfile.close()
_htmltext.replace(url, outfilename)
cleanup.append(outfilename)
return cleanup, _htmltext
def publishFromPOST(self):
html = self.request.form.get('html')
style = self.request.form.get('style')
uid = self.request.form.get('uid')
reporthtml = "<html><head>%s</head><body><div id='report'>%s</body></html>" % (style, html);
return self.publishFromHTML(uid, safe_unicode(reporthtml).encode('utf-8'));
def publishFromHTML(self, prouid, results_html):
uc = getToolByName(self.context, 'uid_catalog')
pros = uc(UID=prouid)
if not pros or len(pros) != 1:
return []
pro = pros[0].getObject();
# HTML written to debug file
debug_mode = App.config.getConfiguration().debug_mode
if debug_mode:
tmp_fn = tempfile.mktemp(suffix=".html")
logger.debug("Writing HTML for %s to %s" % (pro.Title(), tmp_fn))
open(tmp_fn, "wb").write(results_html)
# Create the pdf report (will always be attached to the Order)
# we must supply the file ourself so that createPdf leaves it alone.
# This version replaces 'attachment' links; probably not required,
# so it's repeated below, without these localise_images.
# cleanup, results_html_for_pdf = self.localise_images(results_html)
# pdf_fn = tempfile.mktemp(suffix=".pdf")
# pdf_report = createPdf(htmlreport=results_html_for_pdf, outfile=pdf_fn)
# for fn in cleanup:
# os.remove(fn)
pdf_fn = tempfile.mktemp(suffix=".pdf")
pdf_report = createPdf(htmlreport=results_html, outfile=pdf_fn)
# PDF written to debug file
if debug_mode:
logger.debug("Writing PDF for %s to %s" % (pro.Title(), pdf_fn))
else:
os.remove(pdf_fn)
recipients = []
# Send report to supplier
supplier_data = self._supplier_data(pro)
title = encode_header(supplier_data.get('title', ''))
email = supplier_data.get('email')
formatted = formataddr((title, email))
# Create the new mime_msg object
mime_msg = MIMEMultipart('related')
mime_msg['Subject'] = self.get_mail_subject(pro)
"""
Edit this to change the From address
mime_msg['From'] = formataddr(
(encode_header(lab.getName()), lab.getEmailAddress()))
"""
mime_msg['From'] = formataddr(("BIKA IMM", "imm@bika.com"))
mime_msg.preamble = 'This is a multi-part MIME message.'
msg_txt = MIMEText(results_html, _subtype='html')
mime_msg.attach(msg_txt)
mime_msg['To'] = formatted
# Attach the pdf to the email if requested
if pdf_report:
attachPdf(mime_msg, pdf_report, pdf_fn)
msg_string = mime_msg.as_string()
# content of outgoing email written to debug file
if debug_mode:
tmp_fn = tempfile.mktemp(suffix=".email")
logger.debug("Writing MIME message for %s to %s" % (pro.Title(), tmp_fn))
open(tmp_fn, "wb").write(msg_string)
try:
host = getToolByName(pro, 'MailHost')
host.send(msg_string, immediate=True)
except SMTPServerDisconnected as msg:
logger.warn("SMTPServerDisconnected: %s." % msg)
except SMTPRecipientsRefused as msg:
raise WorkflowException(str(msg))
pro.setDateDispatched(DateTime())
return [pro]
def publish(self):
""" Publish the Order. Generates a results pdf file
associated, sends an email with the report to
the lab manager and sends a notification (usually an email
with the PDF attached) to the Supplier's contact and CCs.
"""
if len(self._products) > 1:
published_products = []
for pro in self._products:
propub = OrderPublishView(pro, self.request, publish=True)
pro = propub.publish()
published_products.extend(pro)
published_products = [ppro.id for ppro in published_products]
return published_products
results_html = safe_unicode(self.template()).encode('utf-8')
return self.publishFromHTML(results_html)
def get_mail_subject(self, ar):
""" Returns the email subject
"""
supplier = ar.aq_parent
subject ="Order Details: %s" % (ar.getDateDispatched())
return subject
|
import dash_bootstrap_components as dbc
from dash import html
from .util import make_subheading
tooltip = html.Div(
[
make_subheading("Tooltip", "tooltip"),
html.P(
[
"I wonder what ",
html.Span(
"floccinaucinihilipilification", id="tooltip-target"
),
" means?",
]
),
dbc.Tooltip(
"Noun: rare, "
"the action or habit of estimating something as worthless.",
target="tooltip-target",
),
],
className="mb-4",
)
|
# encoding: utf-8
import logging
import torch
import torch.nn as nn
from torch.nn import DataParallel
# from engine.data_parallel import DataParallel
# #self create dataparallel for unbalance GPU memory size
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint, Timer,global_step_from_engine
from ignite.metrics import RunningAverage
from ignite.contrib.handlers.tqdm_logger import ProgressBar
from utils.reid_metric import R1_mAP
def create_supervised_trainer(model, optimizer, loss_fn,
device=None):
if device:
if torch.cuda.device_count() > 1:
model = DataParallel(model)
model.to(device)
def _update(engine, batch):
model.train()
optimizer.zero_grad()
img, target = batch
img = img.to(device) if torch.cuda.device_count() >= 1 else img
target = target.to(device) if torch.cuda.device_count() >= 1 else target
score, feat = model(img)
loss,loss_dict = loss_fn(score, feat, target)
loss.backward()
optimizer.step()
# compute acc
acc = (score.max(1)[1] == target).float().mean()
loss_dict['loss'] = loss.item()
return acc.item(),loss_dict
return Engine(_update)
def create_supervised_trainer_with_mask(model, optimizer, loss_fn,
device=None):
if device:
if torch.cuda.device_count() > 1:
model = DataParallel(model)
model.to(device)
def _update(engine, batch):
model.train()
optimizer.zero_grad()
img, target ,masks = batch
img = img.to(device) if torch.cuda.device_count() >= 1 else img
target = target.to(device) if torch.cuda.device_count() >= 1 else target
score, feat = model(img,masks)
loss,loss_dict = loss_fn(score, feat, target)
loss.backward()
optimizer.step()
# compute acc
acc = (score.max(1)[1] == target).float().mean()
loss_dict['loss'] = loss.item()
return acc.item(),loss_dict
return Engine(_update)
def create_supervised_trainer_with_center(model, center_criterion, optimizer, optimizer_center, loss_fn, cetner_loss_weight,
device=None):
if device:
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
model.to(device)
def _update(engine, batch):
model.train()
optimizer.zero_grad()
optimizer_center.zero_grad()
img, target = batch
img = img.to(device) if torch.cuda.device_count() >= 1 else img
target = target.to(device) if torch.cuda.device_count() >= 1 else target
score, feat = model(img)
loss,loss_dict = loss_fn(score, feat, target)
# print("Total loss is {}, center loss is {}".format(loss, center_criterion(feat, target)))
loss.backward()
optimizer.step()
for param in center_criterion.parameters():
param.grad.data *= (1. / cetner_loss_weight)
optimizer_center.step()
# compute acc
acc = (score.max(1)[1] == target).float().mean()
loss_dict['loss'] = loss.item()
return acc.item(),loss_dict
return Engine(_update)
# +
def create_supervised_evaluator(model, metrics,
device=None):
if device:
# if torch.cuda.device_count() > 1:
# model = nn.DataParallel(model)
model.to(device)
def _inference(engine, batch):
model.eval()
with torch.no_grad():
data, pids, camids = batch
data = data.to(device) if torch.cuda.device_count() >= 1 else data
feat = model(data)
return feat, pids, camids
engine = Engine(_inference)
for name, metric in metrics.items():
metric.attach(engine, name)
return engine
def create_supervised_evaluator_with_mask(model, metrics,
device=None):
if device:
# if torch.cuda.device_count() > 1:
# model = nn.DataParallel(model)
model.to(device)
def _inference(engine, batch):
model.eval()
with torch.no_grad():
data, pids, camids ,masks = batch
data = data.to(device) if torch.cuda.device_count() >= 1 else data
feat = model(data,masks)
return feat, pids, camids
engine = Engine(_inference)
for name, metric in metrics.items():
metric.attach(engine, name)
return engine
def create_supervised_evaluator_with_mask_new_eval(model, metrics,
device=None):
if device:
# if torch.cuda.device_count() > 1:
# model = nn.DataParallel(model)
model.to(device)
def _inference(engine, batch):
model.eval()
with torch.no_grad():
data, pids, ambi, camids ,masks = batch
data = data.to(device) if torch.cuda.device_count() >= 1 else data
feat = model(data,masks)
return feat, pids, ambi, camids
engine = Engine(_inference)
for name, metric in metrics.items():
metric.attach(engine, name)
return engine
# -
def do_train(
cfg,
model,
train_loader,
val_loader,
optimizer,
scheduler,
loss_fn,
num_query,
start_epoch
):
# checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
eval_period = cfg.SOLVER.EVAL_PERIOD
output_dir = cfg.OUTPUT_DIR
device = cfg.MODEL.DEVICE
epochs = cfg.SOLVER.MAX_EPOCHS
logger = logging.getLogger("reid_baseline.train")
logger.info("Start training")
# Create 1. trainer 2. evaluator 3. checkpointer 4. timer 5. pbar
if len(train_loader.dataset.dataset[0]) == 4 : #train with mask
trainer = create_supervised_trainer_with_mask(model, optimizer, loss_fn, device=device)
if cfg.TEST.NEW_EVAL == False:
evaluator = create_supervised_evaluator_with_mask(model, metrics={'r1_mAP': R1_mAP(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)}, device=device)
else:
evaluator = create_supervised_evaluator_with_mask_new_eval(model, metrics={'r1_mAP': R1_mAP(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM,new_eval=True)}, device=device)
else: # no mask
trainer = create_supervised_trainer(model, optimizer, loss_fn, device=device)
if cfg.TEST.NEW_EVAL == False:
evaluator = create_supervised_evaluator(model, metrics={'r1_mAP': R1_mAP(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)}, device=device)
else:
raise NotImplementedError
checkpointer = ModelCheckpoint(output_dir, cfg.MODEL.NAME, n_saved=1, require_empty=False,\
score_function=lambda x : x.state.metrics['r1_mAP'][1],\
global_step_transform=global_step_from_engine(trainer))
timer = Timer(average=True)
tpbar = ProgressBar(persist=True,ncols=120)
epbar = ProgressBar(persist=True,ncols=120)
#############################################################
evaluator.add_event_handler(Events.EPOCH_COMPLETED(every=1), checkpointer, \
{'model': model,'optimizer': optimizer})
timer.attach(trainer, start=Events.EPOCH_STARTED, resume=Events.ITERATION_STARTED,
pause=Events.ITERATION_COMPLETED, step=Events.ITERATION_COMPLETED)
tpbar.attach(trainer)
epbar.attach(evaluator)
# average metric to attach on trainer
RunningAverage(output_transform=lambda x: x[0]).attach(trainer, 'avg_acc')
RunningAverage(output_transform=lambda x: x[1]['loss']).attach(trainer, 'avg_loss')
RunningAverage(output_transform=lambda x: x[1]['triplet']).attach(trainer, 'avg_trip')
@trainer.on(Events.STARTED)
def start_training(engine):
engine.state.epoch = start_epoch
@trainer.on(Events.EPOCH_COMPLETED)
def adjust_learning_rate(engine):
# if engine.state.epoch == 1:
# scheduler.step()
scheduler.step()
# adding handlers using `trainer.on` decorator API
@trainer.on(Events.EPOCH_COMPLETED)
def print_times(engine):
logger.info('Epoch {} done. Total Loss : {:.3f}, Triplet Loss : {:.3f}, Acc : {:.3f}, Base Lr : {:.2e}'
.format(engine.state.epoch, engine.state.metrics['avg_loss'],engine.state.metrics['avg_trip'],
engine.state.metrics['avg_acc'],scheduler.get_last_lr()[0]))
timer.reset()
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
if engine.state.epoch % eval_period == 0:
# evaluator.state.epoch = trainer.state.epoch
evaluator.run(val_loader)
cmc, mAP = evaluator.state.metrics['r1_mAP']
logger.info("Validation Results - Epoch: {}".format(engine.state.epoch))
logger.info("mAP: {:.1%}".format(mAP))
for r in [1, 5, 10]:
logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))
trainer.run(train_loader, max_epochs=epochs)
def do_train_with_center(
cfg,
model,
center_criterion,
train_loader,
val_loader,
optimizer,
optimizer_center,
scheduler,
loss_fn,
num_query,
start_epoch
):
# checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
eval_period = cfg.SOLVER.EVAL_PERIOD
output_dir = cfg.OUTPUT_DIR
device = cfg.MODEL.DEVICE
epochs = cfg.SOLVER.MAX_EPOCHS
logger = logging.getLogger("reid_baseline.train")
logger.info("Start training")
trainer = create_supervised_trainer_with_center(model, center_criterion, optimizer, optimizer_center, loss_fn, cfg.SOLVER.CENTER_LOSS_WEIGHT, device=device)
evaluator = create_supervised_evaluator(model, metrics={'r1_mAP': R1_mAP(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)}, device=device)
checkpointer = ModelCheckpoint(output_dir, cfg.MODEL.NAME, None, n_saved=10, require_empty=False)
timer = Timer(average=True)
pbar = ProgressBar(persist=True,ncols=120)
trainer.add_event_handler(Events.EPOCH_COMPLETED(every=checkpoint_period), checkpointer, {'model': model,
'optimizer': optimizer,
'center_param': center_criterion,
'optimizer_center': optimizer_center})
timer.attach(trainer, start=Events.EPOCH_STARTED, resume=Events.ITERATION_STARTED,
pause=Events.ITERATION_COMPLETED, step=Events.ITERATION_COMPLETED)
pbar.attach(trainer)
# average metric to attach on trainer
RunningAverage(output_transform=lambda x: x[0]).attach(trainer, 'avg_acc')
RunningAverage(output_transform=lambda x: x[1]).attach(trainer, 'avg_loss')
RunningAverage(output_transform=lambda x: x[1]['triplet']).attach(trainer, 'avg_trip')
RunningAverage(output_transform=lambda x: x[1]['center']).attach(trainer, 'avg_center')
@trainer.on(Events.STARTED)
def start_training(engine):
engine.state.epoch = start_epoch
@trainer.on(Events.EPOCH_COMPLETED)
def adjust_learning_rate(engine):
scheduler.step()
# adding handlers using `trainer.on` decorator API
@trainer.on(Events.EPOCH_COMPLETED)
def print_times(engine):
logger.info('Epoch {} done. Total Loss : {:.3f}, Triplet Loss : {:.3f}, Center Loss , Acc : {:.3f}, Base Lr : {:.2e}'
.format(engine.state.epoch, engine.state.metrics['avg_loss'],engine.state.metrics['avg_trip'],
engine.state.metrics['avg_center'],engine.state.metrics['avg_acc'],scheduler.get_lr()[0]))
timer.reset()
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
if engine.state.epoch % eval_period == 0:
evaluator.run(val_loader)
cmc, mAP = evaluator.state.metrics['r1_mAP']
logger.info("Validation Results - Epoch: {}".format(engine.state.epoch))
logger.info("mAP: {:.1%}".format(mAP))
for r in [1, 5, 10]:
logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))
trainer.run(train_loader, max_epochs=epochs)
|
"""Unit tests for the configuration variables"""
from ..src import config as cf
import numpy as np
def test_backendport():
assert isinstance(cf.BACKEND_PORT, int)
def test_host():
assert isinstance(cf.HOST, str)
def test_model():
assert isinstance(cf.MODEL, str)
def test_model_url():
assert isinstance(cf.MODEL_URL, str)
def test_version():
assert isinstance(cf.VERSION, str)
def test_codes():
assert isinstance(cf.CODES, np.ndarray)
def test_input_size():
assert isinstance(cf.INPUT_SIZE, tuple)
def test_bath_size():
assert isinstance(cf.BATCH_SIZE, int)
def test_freeze_layer():
assert isinstance(cf.FREEZE_LAYER, int)
def test_epochs():
assert isinstance(cf.EPOCHS, int)
def test_learning_rate():
assert isinstance(cf.LEARNING_RATE, slice)
def test_weight_decay():
assert isinstance(cf.WEIGHT_DECAY, float)
def test_save_model():
assert isinstance(cf.SAVE_MODEL, str)
|
import os
import numpy as np
import torch
import random
import time
import numba as nb
import yaml
import pickle
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader, Dataset, Sampler, random_split
from tqdm import tqdm
from scipy import stats as s
class SemanticKittiModule(LightningDataModule):
def __init__(self, cfg, verbose=True):
super().__init__()
self.cfg = cfg
self.verbose = verbose
def prepare_data(self):
pass
def setup(self, stage=None):
train_split = 'train'
if 'SAVE_FEATURES' in self.cfg.keys():
val_split = 'train'
print("using train split as val split")
else:
val_split = 'valid'
test_split = 'test'
########## Point dataset splits
train_pt_dataset = SemanticKitti(
self.cfg.DATA_CONFIG.DATASET_PATH + '/sequences/',
split = train_split
)
val_pt_dataset = SemanticKitti(
self.cfg.DATA_CONFIG.DATASET_PATH + '/sequences/',
split = val_split
)
test_pt_dataset = SemanticKitti(
self.cfg.DATA_CONFIG.DATASET_PATH + '/sequences/',
split = test_split
)
########## Voxel spherical dataset splits
self.train_set = CylindricalSemanticKitti(
train_pt_dataset,
grid_size = self.cfg.DATA_CONFIG.DATALOADER.GRID_SIZE,
ignore_label = self.cfg.DATA_CONFIG.DATALOADER.CONVERT_IGNORE_LABEL,
fixed_volume_space = self.cfg.DATA_CONFIG.DATALOADER.FIXED_VOLUME_SPACE,
)
self.val_set = CylindricalSemanticKitti(
val_pt_dataset,
grid_size = self.cfg.DATA_CONFIG.DATALOADER.GRID_SIZE,
ignore_label = self.cfg.DATA_CONFIG.DATALOADER.CONVERT_IGNORE_LABEL,
fixed_volume_space = self.cfg.DATA_CONFIG.DATALOADER.FIXED_VOLUME_SPACE,
)
self.test_set = CylindricalSemanticKitti(
test_pt_dataset,
grid_size = self.cfg.DATA_CONFIG.DATALOADER.GRID_SIZE,
ignore_label = self.cfg.DATA_CONFIG.DATALOADER.CONVERT_IGNORE_LABEL,
fixed_volume_space = self.cfg.DATA_CONFIG.DATALOADER.FIXED_VOLUME_SPACE,
)
def train_dataloader(self):
self.train_loader = DataLoader(
dataset = self.train_set,
batch_size = self.cfg.EVAL.BATCH_SIZE,
collate_fn = collate_fn_BEV,
shuffle = self.cfg.DATA_CONFIG.DATALOADER.SHUFFLE,
num_workers = self.cfg.DATA_CONFIG.DATALOADER.NUM_WORKER,
pin_memory = True,
drop_last = False,
timeout = 0
)
self.train_iter = iter(self.train_loader)
return self.train_loader
def val_dataloader(self):
self.valid_loader = DataLoader(
dataset = self.val_set,
batch_size = self.cfg.EVAL.BATCH_SIZE,
collate_fn = collate_fn_BEV,
shuffle = False,
num_workers = self.cfg.DATA_CONFIG.DATALOADER.NUM_WORKER,
pin_memory = True,
drop_last = False,
timeout = 0
)
self.valid_iter = iter(self.valid_loader)
return self.valid_loader
def test_dataloader(self):
self.test_loader = DataLoader(
dataset = self.test_set,
batch_size = self.cfg.EVAL.BATCH_SIZE,
collate_fn = collate_fn_BEV,
shuffle = False,
num_workers = self.cfg.DATA_CONFIG.DATALOADER.NUM_WORKER,
pin_memory = True,
drop_last = False,
timeout = 0
)
self.test_iter = iter(self.test_loader)
return self.test_loader
class SemanticKitti(Dataset):
def __init__(self, data_path, split='train', seq=None):
with open("datasets/semantic-kitti.yaml", 'r') as stream:
semkittiyaml = yaml.safe_load(stream)
SemKITTI_label_name = dict()
for i in sorted(list(semkittiyaml['learning_map'].keys()))[::-1]:
SemKITTI_label_name[semkittiyaml['learning_map'][i]] = semkittiyaml['labels'][i]
self.learning_map = semkittiyaml['learning_map']
self.split = split
split = semkittiyaml['split'][self.split]
self.im_idx = []
pose_files = []
calib_files = []
for i_folder in split:
self.im_idx += absoluteFilePaths('/'.join([data_path,str(i_folder).zfill(2),'velodyne']))
pose_files.append(absoluteDirPath(data_path+str(i_folder).zfill(2)+'/poses.txt'))
calib_files.append(absoluteDirPath(data_path+str(i_folder).zfill(2)+'/calib.txt'))
self.im_idx.sort()
self.poses = load_poses(pose_files, calib_files)
self.things = ['car', 'truck', 'bicycle', 'motorcycle', 'bus', 'person',
'bicyclist', 'motorcyclist']
self.stuff = ['road', 'sidewalk', 'parking', 'other-ground', 'building',
'vegetation', 'trunk', 'terrain', 'fence', 'pole', 'traffic-sign']
self.things_ids = []
for i in sorted(list(semkittiyaml['labels'].keys())):
if SemKITTI_label_name[semkittiyaml['learning_map'][i]] in self.things:
self.things_ids.append(i)
def __len__(self):
return len(self.im_idx)
def __getitem__(self, index):
raw_data = np.fromfile(self.im_idx[index], dtype=np.float32).reshape((-1, 4))
if self.split == 'test':
annotated_data = np.expand_dims(np.zeros_like(raw_data[:,0],dtype=int),axis=1)
sem_labels = annotated_data
ins_labels = annotated_data
valid = annotated_data
else:
annotated_data = np.fromfile(self.im_idx[index].replace('velodyne','labels')[:-3]+'label', dtype=np.int32).reshape((-1,1))
sem_labels = annotated_data & 0xFFFF #delete high 16 digits binary
ins_labels = annotated_data
valid = np.isin(sem_labels, self.things_ids).reshape(-1) # use 0 to filter out valid indexes is enough
sem_labels = np.vectorize(self.learning_map.__getitem__)(sem_labels)
data_tuple = (raw_data[:,:3], sem_labels.astype(np.uint8))
data_tuple += (raw_data[:,3],)#ref
data_tuple += (ins_labels, valid)#ins ids
data_tuple += (self.im_idx[index],)#filename
data_tuple += (self.poses[index],)#pose
return data_tuple
class CylindricalSemanticKitti(Dataset):
def __init__(self, in_dataset, grid_size, min_rad=-np.pi/4, max_rad=np.pi/4,
ignore_label = 255, fixed_volume_space= False,
max_volume_space = [50,np.pi,1.5], min_volume_space = [3,-np.pi,-3],
center_type='Axis_center'):
'Initialization'
self.point_cloud_dataset = in_dataset
self.grid_size = np.asarray(grid_size)
self.ignore_label = ignore_label
self.fixed_volume_space = fixed_volume_space
self.max_volume_space = max_volume_space
self.min_volume_space = min_volume_space
self.center_type = center_type
def __len__(self):
return len(self.point_cloud_dataset)
def __getitem__(self, index):
data = self.point_cloud_dataset[index]
if len(data) == 6:
xyz,labels,sig,ins_labels,valid,pcd_fname = data
if len(sig.shape) == 2: sig = np.squeeze(sig)
elif len(data) == 7:
xyz,labels,sig,ins_labels,valid,pcd_fname,pose = data
if len(sig.shape) == 2: sig = np.squeeze(sig)
else: raise Exception('Return invalid data tuple')
# convert coordinate into polar coordinates
xyz_pol = cart2polar(xyz)
max_bound_r = np.percentile(xyz_pol[:,0],100,axis = 0)
min_bound_r = np.percentile(xyz_pol[:,0],0,axis = 0)
max_bound = np.max(xyz_pol[:,1:],axis = 0)
min_bound = np.min(xyz_pol[:,1:],axis = 0)
max_bound = np.concatenate(([max_bound_r],max_bound))
min_bound = np.concatenate(([min_bound_r],min_bound))
if self.fixed_volume_space:
max_bound = np.asarray(self.max_volume_space)
min_bound = np.asarray(self.min_volume_space)
# get grid index
crop_range = max_bound - min_bound
cur_grid_size = self.grid_size
intervals = crop_range/(cur_grid_size-1) # (size-1) could directly get index starting from 0, very convenient
if (intervals==0).any(): print("Zero interval!")
grid_ind = (np.floor((np.clip(xyz_pol,min_bound,max_bound)-min_bound)/intervals)).astype(np.int) # point-wise grid index
# process voxel position
voxel_position = np.zeros(self.grid_size,dtype = np.float32)
dim_array = np.ones(len(self.grid_size)+1,int)
dim_array[0] = -1
voxel_position = np.indices(self.grid_size)*intervals.reshape(dim_array) + min_bound.reshape(dim_array)
voxel_position = polar2cat(voxel_position)
# process valid voxel labels
valid_voxels, vox_to_point = np.unique(grid_ind, return_inverse=True, axis=0)
voxel_labels = np.ones(valid_voxels.shape[0],dtype = np.uint8)*self.ignore_label
label_voxel_pair = np.concatenate([grid_ind,labels],axis = 1)
label_voxel_pair = label_voxel_pair[np.lexsort((grid_ind[:,2],grid_ind[:,1],grid_ind[:,0])),:] #same order as coordinates to create sparse tensor when using np.unique
voxel_labels = nb_get_voxel_labels(np.copy(voxel_labels),label_voxel_pair) #get valid voxel labels
voxel_labels = voxel_labels.reshape(1,voxel_labels.shape[0]) #add batch dimension
data_tuple = (voxel_position,voxel_labels)
# center data on each voxel for PTnet
voxel_centers = (grid_ind.astype(np.float32) + 0.5)*intervals + min_bound
return_xyz = xyz_pol - voxel_centers
return_xyz = np.concatenate((return_xyz,xyz_pol,xyz[:,:2]),axis = 1)
#point features
return_fea = np.concatenate((return_xyz,sig[...,np.newaxis]),axis = 1)
# (grid-wise coor, grid-wise sem label, point-wise grid index, indices voxel to point, point-wise sem label, [relative polar coor(3), polar coor(3), cat coor(2), ref signal(1)])
data_tuple += (grid_ind, vox_to_point,labels,return_fea)
offsets = np.zeros([xyz.shape[0], 3], dtype=np.float32)
offsets = nb_aggregate_pointwise_center_offset(offsets, xyz, ins_labels, self.center_type)
if len(data) == 6:
data_tuple += (ins_labels, offsets, valid, xyz, pcd_fname) # plus (point-wise instance label, point-wise center offset)
if len(data) == 7:
data_tuple += (ins_labels, offsets, valid, xyz, pcd_fname, pose) # plus (point-wise instance label, point-wise center offset)
return data_tuple
def calc_xyz_middle(xyz):
return np.array([
(np.max(xyz[:, 0]) + np.min(xyz[:, 0])) / 2.0,
(np.max(xyz[:, 1]) + np.min(xyz[:, 1])) / 2.0,
(np.max(xyz[:, 2]) + np.min(xyz[:, 2])) / 2.0
], dtype=np.float32)
things_ids = set([10, 11, 13, 15, 16, 18, 20, 30, 31, 32, 252, 253, 254, 255, 256, 257, 258, 259])
# @nb.jit
def nb_aggregate_pointwise_center_offset(offsets, xyz, ins_labels, center_type):
# ins_num = np.max(ins_labels) + 1
# for i in range(1, ins_num):
for i in np.unique(ins_labels):
if (i & 0xFFFF) not in things_ids:
continue
i_indices = (ins_labels == i).reshape(-1)
xyz_i = xyz[i_indices]
if xyz_i.shape[0] <= 0:
continue
if center_type == 'Axis_center':
mean_xyz = calc_xyz_middle(xyz_i)
elif center_type == 'Mass_center':
mean_xyz = np.mean(xyz_i, axis=0)
offsets[i_indices] = mean_xyz - xyz_i
return offsets
@nb.jit('u1[:](u1[:],i8[:,:])',nopython=True,cache=True,parallel = False)
def nb_get_voxel_labels(voxel_labels,sorted_label_voxel_pair):
label_size = 256
counter = np.zeros((label_size,),dtype = np.uint16)
counter[sorted_label_voxel_pair[0,3]] = 1
cur_sear_ind = sorted_label_voxel_pair[0,:3]
voxel_ind = 0
for i in range(1,sorted_label_voxel_pair.shape[0]):
cur_ind = sorted_label_voxel_pair[i,:3]
if not np.all(np.equal(cur_ind,cur_sear_ind)):
voxel_labels[voxel_ind] = np.argmax(counter)
counter = np.zeros((label_size,),dtype = np.uint16)
cur_sear_ind = cur_ind
voxel_ind += 1
counter[sorted_label_voxel_pair[i,3]] += 1
voxel_labels[voxel_ind] = np.argmax(counter)
return voxel_labels
def collate_fn_BEV(data): # stack along batch dimension
grid_ind_all = [d[2] for d in data]
label_all = [d[1] for d in data]
pt_label_all = [d[3] for d in data]
data2stack=np.stack([d[0] for d in data]).astype(np.float32) # grid-wise coor
label2stack = [d[1] for d in data] # grid-wise sem label
grid_ind_stack = [d[2] for d in data] # point-wise grid index
vox_to_point = [d[3] for d in data] # indices mapping voxel-to-point
point_label = [d[4] for d in data] # point-wise sem label
xyz = [d[5] for d in data] # point-wise coor
pt_ins_labels = [d[6] for d in data] # point-wise instance label
pt_offsets = [d[7] for d in data] # point-wise center offset
pt_valid = [d[8] for d in data] # point-wise indicator for foreground points
pt_cart_xyz = [d[9] for d in data] # point-wise cart coor
filename = [d[10] for d in data] # scan filename
pose = [d[11] for d in data] # pose of the scan
return {
'vox_coor': torch.from_numpy(data2stack),
'vox_labels': label2stack,
'grid': grid_ind_stack,
'vox2point_idx' : vox_to_point,
'pt_labs': point_label,
'pt_fea': xyz,
'pt_ins_labels': pt_ins_labels,
'pt_offsets': pt_offsets,
'pt_valid': pt_valid,
'pt_cart_xyz': pt_cart_xyz,
'pcd_fname': filename,
'pose': pose
}
# Transformations between Cartesian and Polar coordinates
def cart2polar(input_xyz):
rho = np.sqrt(input_xyz[:,0]**2 + input_xyz[:,1]**2)
phi = np.arctan2(input_xyz[:,1],input_xyz[:,0])
return np.stack((rho,phi,input_xyz[:,2]),axis=1)
def polar2cat(input_xyz_polar):
x = input_xyz_polar[0]*np.cos(input_xyz_polar[1])
y = input_xyz_polar[0]*np.sin(input_xyz_polar[1])
return np.stack((x,y,input_xyz_polar[2]),axis=0)
def absoluteFilePaths(directory):
for dirpath,_,filenames in os.walk(directory):
for f in filenames:
yield os.path.abspath(os.path.join(dirpath, f))
def absoluteDirPath(directory):
return os.path.abspath(directory)
def parse_calibration(filename):
calib = {}
calib_file = open(filename)
for line in calib_file:
key, content = line.strip().split(":")
values = [float(v) for v in content.strip().split()]
pose = np.zeros((4, 4))
pose[0, 0:4] = values[0:4]
pose[1, 0:4] = values[4:8]
pose[2, 0:4] = values[8:12]
pose[3, 3] = 1.0
calib[key] = pose
calib_file.close()
return calib
def parse_poses(filename, calibration):
file = open(filename)
poses = []
Tr = calibration["Tr"]
Tr_inv = np.linalg.inv(Tr)
for line in file:
values = [float(v) for v in line.strip().split()]
pose = np.zeros((4, 4))
pose[0, 0:4] = values[0:4]
pose[1, 0:4] = values[4:8]
pose[2, 0:4] = values[8:12]
pose[3, 3] = 1.0
poses.append(np.matmul(Tr_inv, np.matmul(pose, Tr)))
return poses
def load_poses(pose_files, calib_files):
poses = []
for i in range(len(pose_files)):
calib = parse_calibration(calib_files[i])
seq_poses_f64 = parse_poses(pose_files[i], calib)
seq_poses = ([pose.astype(np.float32) for pose in seq_poses_f64])
poses += seq_poses
return poses
|
import numpy as np
import cv2
import utils_box3d
import time
import yaml
image_orig=cv2.imread('kitti.png')
yaw = 0.019480967695382434 #+ np.pi/2
dims = np.array([1.48773544, 1.59376032, 3.74524751])
box_2D = np.array([ 767., 176., 1084., 357.], dtype=np.float)
P = np.array([[7.215377000000e+02, 0.000000000000e+00, 6.095593000000e+02, 4.485728000000e+01],
[0.000000000000e+00, 7.215377000000e+02, 1.728540000000e+02, 2.163791000000e-01],
[0.000000000000e+00, 0.000000000000e+00, 1.000000000000e+00, 2.745884000000e-03]])
inds_right_0_89 = []
inds_right_90_179 = []
inds_right_180_269 = []
inds_right_270_359 = []
inds_left_0_89 = []
inds_left_90_179 = []
inds_left_180_269 = []
inds_left_270_359 = []
# theta
box_2D_orig = box_2D.copy()
for offset in [0, -150]:
box_2D[0] = box_2D_orig[0] + offset
box_2D[2] = box_2D_orig[2] + offset
for yaw in range(0, 89, 2):
yaw = yaw * (np.pi/180.0)
points2D = utils_box3d.gen_3D_box(yaw, dims, P, box_2D)
for i in utils_box3d.inds_used:
new = str(i[0])+str(i[1])+str(i[2])+str(i[3])
inds_right_0_89.append(new)
utils_box3d.inds_used = []
for yaw in range(90, 179, 2):
yaw = yaw * (np.pi/180.0)
points2D = utils_box3d.gen_3D_box(yaw, dims, P, box_2D)
for i in utils_box3d.inds_used:
new = str(i[0])+str(i[1])+str(i[2])+str(i[3])
inds_right_90_179.append(new)
utils_box3d.inds_used = []
for yaw in range(180, 269, 2):
yaw = yaw * (np.pi/180.0)
points2D = utils_box3d.gen_3D_box(yaw, dims, P, box_2D)
for i in utils_box3d.inds_used:
new = str(i[0])+str(i[1])+str(i[2])+str(i[3])
inds_right_180_269.append(new)
utils_box3d.inds_used = []
for yaw in range(270, 359, 2):
yaw = yaw * (np.pi/180.0)
points2D = utils_box3d.gen_3D_box(yaw, dims, P, box_2D)
for i in utils_box3d.inds_used:
new = str(i[0])+str(i[1])+str(i[2])+str(i[3])
inds_right_270_359.append(new)
utils_box3d.inds_used = []
for offset in [-500, -750]:
box_2D[0] = box_2D_orig[0] + offset
box_2D[2] = box_2D_orig[2] + offset
for yaw in range(0, 89, 2):
yaw = yaw * (np.pi/180.0)
points2D = utils_box3d.gen_3D_box(yaw, dims, P, box_2D)
for i in utils_box3d.inds_used:
new = str(i[0])+str(i[1])+str(i[2])+str(i[3])
inds_left_0_89.append(new)
utils_box3d.inds_used = []
for yaw in range(90, 179, 2):
yaw = yaw * (np.pi/180.0)
points2D = utils_box3d.gen_3D_box(yaw, dims, P, box_2D)
for i in utils_box3d.inds_used:
new = str(i[0])+str(i[1])+str(i[2])+str(i[3])
inds_left_90_179.append(new)
utils_box3d.inds_used = []
for yaw in range(180, 269, 2):
yaw = yaw * (np.pi/180.0)
points2D = utils_box3d.gen_3D_box(yaw, dims, P, box_2D)
for i in utils_box3d.inds_used:
new = str(i[0])+str(i[1])+str(i[2])+str(i[3])
inds_left_180_269.append(new)
utils_box3d.inds_used = []
for yaw in range(270, 359, 2):
yaw = yaw * (np.pi/180.0)
points2D = utils_box3d.gen_3D_box(yaw, dims, P, box_2D)
for i in utils_box3d.inds_used:
new = str(i[0])+str(i[1])+str(i[2])+str(i[3])
inds_left_270_359.append(new)
utils_box3d.inds_used = []
d={}
d["r0"] = set(inds_right_0_89)
d["r90"] = set(inds_right_90_179)
d["r180"] = set(inds_right_180_269)
d["r270"] = set(inds_right_270_359)
d["l0"] = set(inds_left_0_89)
d["l90"] = set(inds_left_90_179)
d["l180"] = set(inds_left_180_269)
d["l270"] = set(inds_left_270_359)
with open('result.yml', 'w') as yaml_file:
yaml.dump(d, yaml_file, default_flow_style=False)
|
"Implementation of test-runner for nose2 tests."
import os
import traceback
import nose2
from cosmic_ray.testing.test_runner import TestRunner
from cosmic_ray.util import redirect_stdout, redirect_stderr
class Nose2ResultsCollector(object):
"Nose plugin that collects results for later analysis."
def __init__(self):
self.events = []
def testOutcome(self, event): # pylint: disable=invalid-name
"Store result."
self.events.append(event)
class Nose2Runner(TestRunner): # pylint: disable=too-few-public-methods
"""A TestRunner using nose2.
This treats `test_args` as a list of arguments to `nose2.discover()`. The args
are passed directly to that function. See nose2's command line reference
for a description of what arguments are accepted.
NOTE: ``-s`` is not accepted here!
"""
def _run(self):
argv = ['']
argv += self.test_args.split()
collector = Nose2ResultsCollector()
with open(os.devnull, 'w') as devnull:
with redirect_stdout(devnull):
with redirect_stderr(devnull):
nose2.discover(argv=argv, extraHooks=[('testOutcome', collector)], exit=False)
failures = [x for x in collector.events if x.outcome != 'passed']
return (not failures, [(str(r.test), traceback.format_exception(*r.exc_info)) for r in failures])
|
import github3
import pytest
from tests.utils import BaseCase, load
from unittest import TestCase
class TestEvent(BaseCase):
def __init__(self, methodName='runTest'):
super(TestEvent, self).__init__(methodName)
self.ev = github3.events.Event(load('event'))
self.o = load('org')
def setUp(self):
super(TestEvent, self).setUp()
self.ev = github3.events.Event(self.ev.to_json())
def test_equality(self):
e = github3.events.Event(load('event'))
assert self.ev == e
e._uniq = 1
assert self.ev != e
def test_org(self):
json = self.ev.to_json().copy()
json['org'] = self.o
ev = github3.events.Event(json)
assert isinstance(ev.org, github3.orgs.Organization)
def test_repr(self):
assert repr(self.ev).startswith('<Event')
def test_list_types(self):
Event, handlers = (github3.events.Event,
github3.events._payload_handlers)
assert Event.list_types() == sorted(handlers.keys())
def test_is_public(self):
assert self.ev.is_public() == self.ev.public
class TestPayloadHandlers(TestCase):
def test_commitcomment(self):
comment = {'comment': load('repo_comment')}
comment = github3.events._commitcomment(comment)
assert isinstance(comment['comment'],
github3.repos.comment.RepoComment)
def test_follow(self):
f = {'target': load('user')}
github3.events._follow(f)
assert isinstance(f['target'], github3.users.User)
def test_forkev(self):
f = {'forkee': load('repo')}
github3.events._forkev(f)
assert isinstance(f['forkee'], github3.repos.Repository)
def test_gist(self):
g = {'gist': load('gist')}
github3.events._gist(g)
assert isinstance(g['gist'], github3.gists.Gist)
def test_issuecomm(self):
c = {'issue': load('issue'), 'comment': load('issue_comment')}
github3.events._issuecomm(c)
assert isinstance(c['issue'], github3.issues.Issue)
assert isinstance(c['comment'], github3.issues.comment.IssueComment)
def test_issueevent(self):
c = {'issue': load('issue')}
github3.events._issueevent(c)
assert isinstance(c['issue'], github3.issues.Issue)
def test_member(self):
m = {'member': load('user')}
github3.events._member(m)
assert isinstance(m['member'], github3.users.User)
def test_pullreqev(self):
p = {'pull_request': load('pull')}
github3.events._pullreqev(p)
assert isinstance(p['pull_request'], github3.pulls.PullRequest)
def test_pullreqcomm(self):
p = {'comment': load('review_comment')}
github3.events._pullreqcomm(p)
assert isinstance(p['comment'], github3.pulls.ReviewComment)
@pytest.mark.xfail
def test_team(payload):
t = {'team': load('team'), 'repo': load('repo'), 'user': load('user')}
github3.events._team(t)
assert isinstance(t['team'], github3.orgs.Team)
assert isinstance(t['repo'], github3.repos.Repository)
assert isinstance(t['user'], github3.users.User)
|
from dataclasses import dataclass
@dataclass
class AdapterConfig(object):
"""Implements the adapter configuration proposed by Houlsby et. al, 2019
in https://arxiv.org/abs/1902.00751."""
add_layer_norm_before_adapter: bool = False
add_layer_norm_after_adapter: bool = False
non_linearity: str = "gelu_new"
reduction_factor: int = 16
weight_init_range = 1e-2
# Whether to use conditional layer norms for adapters.
conditional_layer_norm = False
hidden_dim = 128
# Whether to add adapter blocks, this is used in case we need
# to tune only layer norms.
train_adapters_blocks = True
task_adapter_layers_encoder = None
task_adapter_layers_decoder = None
task_adapter_in_decoder = True
intrinsic_dim = 100
normalize_intrinsic_projections = False
# This can be either random, or fastfood.
intrinsic_projection = "random"
# Hypercomplex adapters parameters
hypercomplex_adapters = False
hypercomplex_division = 8
learn_phm = True
hypercomplex_nonlinearity="glorot-uniform"
shared_phm_rule = False
factorized_phm = False
shared_W_phm = False
factorized_phm_rule = False
phm_c_init = "normal"
phm_rank = 1
phm_init_range=0.01
# prefix-tuning parameters.
prefix_dim = 100
init_prefix_from_vocab = False
kronecker_prod = False
# BitFit configuration.
bitfit = False
# Low-rank adapters.
low_rank_adapters = False
low_rank_w_init = "glorot-uniform"
low_rank_rank = 1
# whether using single adapter for all tasks
use_single_adapter = True
class MetaAdapterConfig(AdapterConfig):
"""Implements Meta adapter in which a hyper-network generates the parameters of
adapter layers. In this case we have a task embeddings which is feed to the
hyper-network to allow it generate the weights for the adapter layers."""
task_embedding_dim = 512
task_embedding_dir = None
hidden_dim = 128
train_task_embeddings = False
non_linearity: str = "gelu_new"
projected_task_embedding_dim = 64
task_hidden_dim = 128
parametric_task_embedding = False
# If Specified, uses one hypernet to generates the adapters weights.
unique_hyper_net = True
unique_hyper_net_layer_norm = True
# We consider only one hyper-net for all the blocks of transformer.
efficient_unique_hyper_net = False
task_to_embeddings=None
@dataclass
class CompactorConfig(object):
add_layer_norm_before_adapter: bool = False
add_layer_norm_after_adapter: bool = False
non_linearity: str = "gelu_new"
reduction_factor: int = 16
weight_init_range = 1e-2
# Whether to use conditional layer norms for adapters.
hidden_dim = 128
# Whether to add adapter blocks, this is used in case we need
# to tune only layer norms.
task_adapter_layers_encoder = None
task_adapter_layers_decoder = None
task_adapter_in_decoder = True
intrinsic_dim = 100
normalize_intrinsic_projections = False
# This can be either random, or fastfood.
intrinsic_projection = "random"
# Hypercomplex adapters parameters
hypercomplex_adapters = True
hypercomplex_division = 4
train_task_adapters = True
learn_phm = True
hypercomplex_nonlinearity="glorot-uniform"
shared_phm_rule = True
factorized_phm = True
shared_W_phm = False
factorized_phm_rule = False
phm_c_init = "normal"
phm_rank = 1
phm_init_range=0.0001
# prefix-tuning parameters.
prefix_dim = 100
init_prefix_from_vocab = False
kronecker_prod = False
# BitFit configuration.
bitfit = False
# Low-rank adapters.
low_rank_adapters = False
low_rank_w_init = "glorot-uniform"
low_rank_rank = 1
# whether using single adapter for all tasks
use_single_adapter = False
@dataclass
class LRAdapterConfig(object):
add_layer_norm_before_adapter: bool = False
add_layer_norm_after_adapter: bool = False
non_linearity: str = "gelu_new"
reduction_factor: int = 16
weight_init_range = 1e-2
# Whether to use conditional layer norms for adapters.
hidden_dim = 128
# Whether to add adapter blocks, this is used in case we need
# to tune only layer norms.
task_adapter_layers_encoder = None
task_adapter_layers_decoder = None
task_adapter_in_decoder = True
intrinsic_dim = 100
normalize_intrinsic_projections = False
# This can be either random, or fastfood.
intrinsic_projection = "random"
# Hypercomplex adapters parameters
hypercomplex_adapters = False
hypercomplex_division = 4
train_task_adapters = True
learn_phm = True
hypercomplex_nonlinearity="glorot-uniform"
shared_phm_rule = True
factorized_phm = True
shared_W_phm = False
factorized_phm_rule = False
phm_c_init = "normal"
phm_rank = 1
phm_init_range=0.0001
# prefix-tuning parameters.
prefix_dim = 100
init_prefix_from_vocab = False
kronecker_prod = False
# BitFit configuration.
bitfit = False
# Low-rank adapters.
low_rank_adapters = True
low_rank_w_init = "glorot-uniform"
low_rank_rank = 1
# whether using single adapter for all tasks
use_single_adapter = False
|
"""
__author__ = HackPrinceton 2017 Best Team
__description__ = Initializes files for processing module
"""
|
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import os
import unittest
import threading
import time
from fedlearner.common import etcd_client
class TestEtcdClient(unittest.TestCase):
def test_etcd_op(self):
cli = etcd_client.EtcdClient('test_cluster', 'localhost:2379', 'data_source_a')
cli.delete('fl_key')
cli.set_data('fl_key', 'fl_value')
self.assertEqual(cli.get_data('fl_key'), b'fl_value')
self.assertFalse(cli.cas('fl_key', 'fl_value1', 'fl_value2'))
self.assertTrue(cli.cas('fl_key', 'fl_value', 'fl_value1'))
self.assertEqual(cli.get_data('fl_key'), b'fl_value1')
goahead = False
def thread_routine():
cli.set_data('fl_key', 'fl_value2')
self.assertEqual(cli.get_data('fl_key'), b'fl_value2')
eiter, cancel = cli.watch_key('fl_key')
other = threading.Thread(target=thread_routine)
other.start()
for e in eiter:
self.assertEqual(e.key, b'/data_source_a/fl_key')
self.assertEqual(e.value, b'fl_value2')
cancel()
other.join()
cli.destory_client_pool()
if __name__ == '__main__':
unittest.main()
|
import utime
import math
from array import array
from machine import Pin, PWM, Timer
from sound import Sound
def read_words(filename):
buffer = bytearray(128)
with open(filename, 'rb', buffering=0) as file:
while True:
n = file.readinto(buffer)
if n == 0:
break
i = 0
while i + 1 < n:
yield (buffer[i] << 8) | buffer[i + 1]
i += 2
class MusicPlayer:
LED_PINS = [16, 17, 18, 15, 19, 20, 21, 22]
def __init__(self):
self.sound = Sound()
self._init_frequency_table()
self.atten = [15] * 8
self.target = [15] * 8
self.decay_mask = [3] * 8
self.decay_clock = 0
self._init_leds()
self.timer = Timer()
def play_song(self, filename):
try:
self.start_playing()
cmd_time = utime.ticks_ms()
for word in read_words(filename):
cmd_time = self.play_word(word, cmd_time)
utime.sleep_ms(1000)
finally:
self.finish_playing()
def play_words(self, words, cmd_time):
try:
for word in words:
cmd_time = self.play_word(word, cmd_time)
return cmd_time
except KeyboardInterrupt:
self.finish_playing()
raise
def play_word(self, word, cmd_time):
cmd = (word >> 14) & 0x3
if cmd == 0:
# note on: V = voice; A = attenuation; N = note
# 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
# 0 0 V2 V1 V0 A3 A2 A1 A0 N6 N5 N4 N3 N2 N1 N0
note = word & 0x7F
attenuation = (word & 0x780) >> 7
voice = (word & 0x3800) >> 11
self._note_on(voice, note, attenuation)
elif cmd == 1:
# noise on: V = voice; A = attenuation; S = sustain; N = noise type
# 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
# 0 1 0 0 0 V0 S2 S1 S0 A3 A2 A1 A0 N2 N1 N0
noise = (word & 0b111)
atten = (word & 0b1111000) >> 3
sustain = (word & 0b1110000000) >> 7
voice = (word & 0b10000000000) >> 10
voice = 3 + (voice * 4)
self._noise_on(voice, noise, sustain, atten)
elif cmd == 2:
# delay: D = delay in milliseconds
# 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
# 1 0 DD DC DB DA D9 D8 D7 D6 D5 D4 D3 D2 D1 D0
ms = word & 0x3FFF
# TODO figure out why utime.sleep_ms() sometimes failed to wake up
# and then be a bit nicer to the Pico by avoiding this busy wait
cmd_time = utime.ticks_add(cmd_time, ms)
while utime.ticks_diff(cmd_time, utime.ticks_ms()) > 0:
pass
else:
# notes off: C = channel; V = voice mask
# 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
# 1 1 0 0 0 0 0 0 V7 V6 V5 V4 V3 V2 V1 V0
mask = word & 0xFF
self._notes_off(mask)
return cmd_time
def start_playing(self):
self.timer.init(freq=80, mode=Timer.PERIODIC, callback=self._process_envelopes)
def finish_playing(self):
self.timer.deinit()
self._lights_off()
self.sound.silence()
def _init_frequency_table(self):
self.frequency_table = array('H') # unsigned short
n = Sound.CLOCK_FREQ / (32 * 440)
for midi_note in range(128):
f = n / math.pow(2, (midi_note - 69.0) / 12)
while f > 1023:
f /= 2 # shift notes that won't fit into the frequency register up an octave until they do
self.frequency_table.append(round(f))
def _init_leds(self):
self.pwms = []
for pin in MusicPlayer.LED_PINS:
if pin == None:
self.pwms.append(None)
else:
pwm = PWM(Pin(pin))
pwm.freq(120)
pwm.duty_u16(0)
self.pwms.append(pwm)
def _lights_off(self):
for pwm in self.pwms:
if pwm:
pwm.duty_u16(0)
def _set_led_intensity(self, voice, atten):
if self.pwms[voice]:
duty = 0xfff0 >> atten
self.pwms[voice].duty_u16(duty)
def _note_on(self, voice, note, attenuation):
self.atten[voice] = attenuation
self.target[voice] = min(attenuation + 3, 15)
self.sound.set_frequency(voice, self.frequency_table[note])
self.sound.set_attenuation(voice, attenuation)
self._set_led_intensity(voice, attenuation)
def _noise_on(self, voice, noise, sustain, attenuation):
self.atten[voice] = attenuation
self.target[voice] = 15
self.decay_mask[voice] = sustain
self.sound.set_noise(voice, noise)
self.sound.set_attenuation(voice, attenuation)
self._set_led_intensity(voice, attenuation)
def _notes_off(self, mask):
for voice in range(8):
if 0 != (mask & (1 << voice)):
self.target[voice] = 15
def _process_envelopes(self, _timer):
self.decay_clock = (self.decay_clock + 1) & 7
for voice in range(8):
if (self.decay_mask[voice] & self.decay_clock) == 0:
if self.atten[voice] < self.target[voice]:
self.atten[voice] += 1
self.sound.set_attenuation(voice, self.atten[voice])
self._set_led_intensity(voice, self.atten[voice])
|
from __future__ import division
import os
import datetime
import pandas as pd
import numpy as np
from configparser import ConfigParser, NoOptionError, NoSectionError
import glob
import cv2
from pylab import cm
from simba.rw_dfs import *
from simba.drop_bp_cords import *
def ROI_directionality_other_animals_visualize(inifile):
config = ConfigParser()
config.read(inifile)
noAnimals = config.getint('General settings', 'animal_no')
try:
wfileType = config.get('General settings', 'workflow_file_type')
except NoOptionError:
wfileType = 'csv'
projectPath = config.get('General settings', 'project_path')
csv_dir_in = os.path.join(projectPath, 'csv', 'directionality_dataframes')
# frames_dir_out = os.path.join(projectPath, 'frames', 'output', 'Directionality_between_animals')
# if not os.path.exists(frames_dir_out): os.makedirs(frames_dir_out)
vidInfPath = os.path.join(projectPath, 'logs', 'video_info.csv')
vidinfDf = pd.read_csv(vidInfPath)
animalIDlist = config.get('Multi animal IDs', 'id_list')
if not animalIDlist:
animalIDlist = []
for animal in range(noAnimals):
animalIDlist.append('Animal_' + str(animal + 1))
multiAnimalStatus = False
print('Applying settings for classical tracking...')
else:
animalIDlist = animalIDlist.split(",")
multiAnimalStatus = True
print('Applying settings for multi-animal tracking...')
def line_length(p, q, n, M, coord):
Px = np.abs(p[0] - M[0])
Py = np.abs(p[1] - M[1])
Qx = np.abs(q[0] - M[0])
Qy = np.abs(q[1] - M[1])
Nx = np.abs(n[0] - M[0])
Ny = np.abs(n[1] - M[1])
Ph = np.sqrt(Px*Px + Py*Py)
Qh = np.sqrt(Qx*Qx + Qy*Qy)
Nh = np.sqrt(Nx*Nx + Ny*Ny)
if (Nh < Ph and Nh < Qh and Qh < Ph):
coord.extend((q[0], q[1]))
return True, coord
elif (Nh < Ph and Nh < Qh and Ph < Qh):
coord.extend((p[0], p[1]))
return True, coord
else:
return False, coord
filesFound = glob.glob(csv_dir_in + '/*.' + wfileType)
if not filesFound:
print('No directionality calculations found. Please run the calculations before running the visualization creation.')
videoCounter = 1
x_cols, y_cols, p_cols = getBpNames(inifile)
animalBpDict = create_body_part_dictionary(multiAnimalStatus, animalIDlist, noAnimals, x_cols, y_cols, p_cols, [])
for filePath in filesFound:
fileBaseName = os.path.basename(filePath)
filename, fileType = os.path.splitext(fileBaseName)[0], os.path.splitext(fileBaseName)[1]
print('Analyzing ROI features for ' + filename + '...')
currVideoSettings = vidinfDf.loc[vidinfDf['Video'] == filename]
fps = float(currVideoSettings['fps'])
currDf = read_df(filePath, wfileType)
currDf = currDf.fillna(0)
currDf = currDf.apply(pd.to_numeric)
currDf = currDf.reset_index(drop=True)
currDf = currDf.loc[:, ~currDf.columns.str.contains('^Unnamed')]
directionalityDict = checkDirectionalityCords(animalBpDict)
facingDfcols, directionColheaders, directionColEyeXHeads, directionColEyeYHeads, directionColBpXHeads, directionColBpYHeads = [],[],[],[],[],[]
listofListColHeaders = []
listofListEyeXHeaders, listofListEyeYHeaders, listofListBpXHeaders, listofListBpYHeaders = [], [], [],[]
####### CREATE DESTINATION DATAFRAME #############
for animal in directionalityDict.keys():
otherAnimals = animalIDlist.copy()
otherAnimals.remove(animal)
for otherAnimal in otherAnimals:
otherAnimalDictX = animalBpDict[otherAnimal]['X_bps']
currColHeaders, currXEyeHeaders, currYEyeHeaders, currBpXHeaders, currBpYHeaders = [], [], [], [], []
for otherAnimalBp in otherAnimalDictX:
currBp = otherAnimal + '_' + otherAnimalBp
currBp = currBp.replace('_x', '')
directionColheaders.append(str(animal) + '_directing_' + currBp)
currColHeaders.append(directionColheaders[-1])
directionColEyeXHeads.append(str(animal) + '_directing_' + currBp + '_eye_x')
currXEyeHeaders.append(directionColEyeXHeads[-1])
directionColEyeYHeads.append(str(animal) + '_directing_' + currBp + '_eye_y')
currYEyeHeaders.append(directionColEyeYHeads[-1])
directionColBpXHeads.append(str(animal) + '_directing_' + currBp + '_bp_x')
currBpXHeaders.append(directionColBpXHeads[-1])
directionColBpYHeads.append(str(animal) + '_directing_' + currBp + '_bp_y')
currBpYHeaders.append(directionColBpYHeads[-1])
listofListColHeaders.append(currColHeaders)
listofListEyeXHeaders.append(currXEyeHeaders)
listofListEyeYHeaders.append(currYEyeHeaders)
listofListBpXHeaders.append(currBpXHeaders)
listofListBpYHeaders.append(currBpYHeaders)
outputFolderName = os.path.join(projectPath, 'frames', 'output', 'ROI_directionality_visualize')
if not os.path.exists(outputFolderName):
os.makedirs(outputFolderName)
currVideoPath = os.path.join(projectPath, 'videos', filename + '.mp4')
outputfilename = os.path.join(outputFolderName, filename + '.avi')
fourcc = cv2.VideoWriter_fourcc(*'XVID')
cap = cv2.VideoCapture(currVideoPath)
vid_input_width, vid_input_height = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
writer = cv2.VideoWriter(outputfilename, fourcc, int(fps), (vid_input_width, vid_input_height))
mySpaceScaleY, mySpaceScaleX, myRadius, myResolution, myFontScale = 40, 800, 20, 1500, 1
maxResDimension = max(vid_input_width, vid_input_height)
DrawScale = int(myRadius / (myResolution / maxResDimension))
colorList = []
cmaps = ['spring', 'summer', 'autumn', 'cool', 'Wistia', 'Pastel1', 'Set1', 'winter', 'gnuplot', 'gnuplot2', 'cubehelix', 'brg', 'jet', 'terrain', 'ocean', 'rainbow', 'gist_earth', 'gist_stern', 'gist_ncar', 'Spectral', 'coolwarm']
cMapSize = int(len(x_cols) * noAnimals) + 1
colorListofList = []
for colormap in range(len(cmaps)):
currColorMap = cm.get_cmap(cmaps[colormap], cMapSize)
currColorList = []
for i in range(currColorMap.N):
rgb = list((currColorMap(i)[:3]))
rgb = [i * 255 for i in rgb]
rgb.reverse()
currColorList.append(rgb)
colorListofList.append(currColorList)
currRow = 0
while (cap.isOpened()):
ret, img = cap.read()
if ret == True:
overlay = img.copy()
for currentGaze in range(len(listofListColHeaders)):
directingAnimalList = listofListColHeaders[currentGaze]
for directed2bp in range(len(directingAnimalList)):
directing2bodyPart = directingAnimalList[directed2bp]
lookedStatus = int(currDf.loc[currRow, directing2bodyPart])
color = colorListofList[currentGaze][directed2bp]
if lookedStatus == 1:
eye_x_col = listofListEyeXHeaders[currentGaze][directed2bp]
eye_y_col = listofListEyeYHeaders[currentGaze][directed2bp]
eye_x_cord = currDf.loc[currRow, eye_x_col]
eye_y_cord = currDf.loc[currRow, eye_y_col]
bp_x_col = listofListBpXHeaders[currentGaze][directed2bp]
bp_y_col = listofListBpYHeaders[currentGaze][directed2bp]
bp_x_cord = currDf.loc[currRow, bp_x_col]
bp_y_cord = currDf.loc[currRow, bp_y_col]
if (bp_x_cord != 0 and bp_y_cord != 0):
if (eye_x_cord != 0 and eye_y_cord != 0):
cv2.line(overlay, (int(eye_x_cord), int(eye_y_cord)), (int(bp_x_cord), int(bp_y_cord)), color, 4)
overlay = np.uint8(overlay)
image_new = cv2.addWeighted(overlay, 0.6, img, 1 - 0.4, 0)
writer.write(image_new)
print('Image ' + str(currRow + 1) + ' / ' + str(len(currDf)))
if img is None:
print('Video ' + str(outputfilename) + ' saved in project_folder/frames/output/ROI_directionality_visualize')
cap.release()
currRow += 1
|
#camera.py
import cv2
class VideoCamera(object):
def __init__(self):
self.video = cv2.VideoCapture(0)
def __del__(self):
self.video.release()
def get_frame(self):
success, image = self.video.read()
ret, jpeg = cv2.imencode('.jpg', image)
return jpeg.tobytes()
|
from __future__ import division, print_function, absolute_import
from nnmnkwii import paramgen as G
import numpy as np
def _get_windows_set():
windows_set = [
# Static
[
(0, 0, np.array([1.0])),
],
# Static + delta
[
(0, 0, np.array([1.0])),
(1, 1, np.array([-0.5, 0.0, 0.5])),
],
# Static + delta + deltadelta
[
(0, 0, np.array([1.0])),
(1, 1, np.array([-0.5, 0.0, 0.5])),
(1, 1, np.array([1.0, -2.0, 1.0])),
],
]
return windows_set
def test_mlpg():
static_dim = 2
T = 10
windows_set = _get_windows_set()
for windows in windows_set:
means = np.random.rand(T, static_dim * len(windows))
variances = np.tile(np.random.rand(static_dim * len(windows)), (T, 1))
generated = G.mlpg(means, variances, windows)
assert generated.shape == (T, static_dim)
# Test variances correctly expanded
for windows in windows_set:
for dtype in [np.float32, np.float64]:
means = np.random.rand(T, static_dim * len(windows)).astype(dtype)
variances = np.random.rand(static_dim * len(windows)).astype(dtype)
variances_frames = np.tile(variances, (T, 1))
# Explicitly give variances over frame
generated1 = G.mlpg(means, variances_frames, windows)
# Give global variances. This will get expanded over frames
# internally
generated2 = G.mlpg(means, variances, windows)
assert generated1.dtype == dtype
assert np.allclose(generated1, generated2)
def test_mlpg_window_full():
static_dim = 2
T = 10
def full_window_mat_native(win_mats, T):
cocatenated_window = np.zeros((T * len(windows), T))
for win_index, win_mat in enumerate(win_mats):
win = win_mat.full()
b = win_index * T
cocatenated_window[b:b + T, :] = win
return cocatenated_window
for windows in _get_windows_set():
win_mats = G.build_win_mats(windows, T)
fullwin = G.full_window_mat(win_mats, T)
assert fullwin.shape == (T * len(windows), T)
assert np.allclose(full_window_mat_native(win_mats, T), fullwin)
def test_unit_variance_mlpg():
static_dim = 2
T = 10
for windows in _get_windows_set():
means = np.random.rand(T, static_dim * len(windows))
variances = np.ones(static_dim * len(windows))
y = G.mlpg(means, variances, windows)
R = G.unit_variance_mlpg_matrix(windows, T)
y_hat = R.dot(G.reshape_means(means, static_dim))
assert np.allclose(y_hat, y)
def test_reshape_means():
static_dim = 2
T = 10
for windows in _get_windows_set():
means = np.random.rand(T, static_dim * len(windows))
reshaped_means = G.reshape_means(means, static_dim)
assert reshaped_means.shape == (T * len(windows), static_dim)
reshaped_means2 = G.reshape_means(reshaped_means, static_dim)
# Test if call on reshaped means doesn't change anything
assert np.allclose(reshaped_means, reshaped_means2)
|
import asyncio
import blinker
from .reify import reify
from .periodic import Periodic
from .timers import Timers
import logging
logger = logging.getLogger(__name__)
class Deck:
key_spacing = (36, 36)
def __init__(self, deck, keys=None, clear=True, loop=None, **kw):
self._loop = loop or asyncio.get_event_loop()
self._deck = deck
self._brightness = .4
self._clear = clear
self.key_up = blinker.signal('key_up')
self.key_down = blinker.signal('key_down')
self.page_in = blinker.signal('page_in') # called when putting page in foreground
self.page_out = blinker.signal('page_out') # called when about to be put in background
self._pages = {}
self._page_history = [] # track page navigation on a stack
self._deck.set_key_callback(self.cb_keypress)
self._timers = Timers(self, loop, **kw)
self._futures = []
self._check_futures = Periodic(self._loop, 3, self.cb_check_futures)
self._check_futures.start()
self._quit_future = asyncio.Future(loop=loop)
self._deck.reset()
@reify
def serial_number(self):
return self._deck.get_serial_number()
def __str__(self):
return self.serial_number
async def run(self):
"""
await on this method to "run forever" the program
"""
logger.debug("waiting for quit signal...")
await self._quit_future
async def release(self, *args):
"""
call at least once on exiting
this is sometimes called twice depending on ctrl-c vs more
graceful exit. Hence set _deck to None
"""
if self._deck is None:
return
with self._deck:
if self._clear:
self.turn_off()
self._deck.reset()
self._deck.close()
await self._check_futures.stop()
self._deck = None
def __enter__(self):
"""
get lock on self._deck
"""
self._deck.update_lock.acquire()
def __exit__(self, type, value, traceback):
"""
release lock on self._deck
"""
self._deck.update_lock.release()
@property
def brightness(self):
return self._brightness
@brightness.setter
def brightness(self, value):
with self._deck:
self._brightness = value
self._deck.set_brightness(value)
def turn_on(self):
# note that self._brightness is not changed
with self._deck:
self._deck.set_brightness(self._brightness)
def turn_off(self):
# note that self._brightness is not changed
with self._deck:
self._deck.set_brightness(0)
@property
def page(self):
"""
active page
"""
# first run
if not self._page_history:
return None
curr_page = self._page_history[-1]
return self._pages[curr_page]
def add_page(self, name, page):
logger.debug("adding page: %s: %s", name, page)
self._pages[name] = page
def change_page(self, name):
logger.debug("change to page: %s", name)
self.page_out.send_async(self.page)
self._page_history.append(name)
self.page_in.send_async(self.page)
return self.page
def prev_page(self):
"""
go to previous page, pop item off page history
"""
if len(self._page_history) <= 1:
return None
self.page_out.send_async(self.page)
self._page_history.pop()
logger.debug("goto prev page: %s", self._page_history[-1])
self.page_in.send_async(self.page)
return self.page
async def cb_keypress_async(self, device, key, pressed):
# NOTE now we're in the main thread
key = self.page.keys[key]
# logger.debug(f"cb_keypress_async: {key} {pressed}")
if pressed:
return self.key_down.send_async(key)
else:
return self.key_up.send_async(key)
def cb_keypress(self, device, key, state):
# NOTE we're in the streamdeck worker thread, not main
fut = asyncio.run_coroutine_threadsafe(
self.cb_keypress_async(device, key, state),
self._loop
)
self._futures.append(fut)
async def cb_check_futures(self):
"""
check every few seconds that the futures scheduled from the
streamdeck worker thread haven't thrown an exception
this isn't "required" but any problems in a key callback
(basically anything we're trying to accomplish) just disappear
into the void and makes debugging virtually impossible. So log a
stacktrace.
"""
# logger.debug("cb_check_futures: %s", self._futures)
remove = []
for fut in self._futures:
if not fut.done():
continue
try:
results = fut.result() # list of connected listeners for the signal
# not totally confident I know what's going on here...
# I think blink-async:send_async() returns the receiver
# callback and the results of the callback, which in our case
# is the nested coroutine. I think... this seems to work though.
for receiver_cb, task in results:
await task # raises exception if applicable
except asyncio.CancelledError:
pass
except Exception as e:
logger.exception(e)
finally:
remove.append(fut)
for fut in remove:
self._futures.remove(fut)
|
from django.conf.urls import url
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns=[
url('^$',views.home,name='home'),
url(r'^location/(\d+)',views.location,name='location'),
url(r'^search/',views.search,name='search')
]
if settings.DEBUG:
urlpatterns+=static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
|
import argparse
from pathlib import Path
from metadata import SEQUENCES
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--root", type=Path, required=True)
parser.add_argument("--skeleton", type=str, default="BODY_135")
args = parser.parse_args()
root = args.root
skeleton = args.skeleton
executable = f"python {skeleton}.py"
for sequence in SEQUENCES:
poses_2d_dir = root / sequence / "Poses" / "2D" / skeleton
calibration = root / sequence / "cameras.json"
poses_3d = root / sequence / "Poses" / "3D" / skeleton / "Reconstructed.tar.xz"
cmd = f"{executable} --poses_2d_dir={poses_2d_dir} --calibration={calibration} --poses_3d={poses_3d}"
print(cmd)
if __name__ == '__main__':
main()
|
from requests import get
import zipfile
from tkinter import messagebox
def update(interface):
data = get(interface.data["version_url"])
with open("./{0}/siva_update.zip".format(interface.data["directory_name"]), "wb") as out:
out.write(data.content)
zip = zipfile.ZipFile("./siva_files/siva_update.zip", "r")
zip.extractall("./")
zip.close()
messagebox.showinfo("Updater", "Download complete. Please delete the old version and use the new one.")
|
import networkx as nx
import numpy as np
word_delimiters = ['!', '"', '#', '%', '&', '(', ')', '*', '+',
',', '-', '.', '...', '......', '/', ':', ';',
'<', '=', '>', '?', '@', '[', ']', '^', '_',
'`', '{', '|', '}', '~', ',', '。', '。。。',
'。。。。。。', '!', ';', '?', ':', '、', '(', ')',
'\t', '\n', '”', '’' , '“', '‘', "'"]
sentence_delimiters = ['?', '?', ';', ';', '!', '!', '。', '……', '…', '\n']
allow_pos = ['an', 'nz', 'vn', 'v', 'vd', 'x', 'n', 'nt', 'd']
def get_stop_words(path_stop_words):
"""
Parameters:
----------
path_stop_words: 停用词文件路径, 其中一行存储一个停用词
Returns:
-------
stop_words: list, 停用词列表
"""
stop_words = set()
if path_stop_words:
with open(path_stop_words, 'r') as fr:
stop_words = set([word.strip().replace('\n', '') for word in fr.readlines()])
return stop_words
def get_similarity(words1, words2):
"""
计算句子之间的相似度
公式: similarity = |A∩B| / (log(|A|) + log(|B|))
Parameters:
----------
words1: list, 词列表
words2: list, 词列表
Returns:
-------
sim_value: float, 句子之间的相似度
"""
# 注意, 这里假设认为整个句式只有一个词的时候是无法对比相似度的
if len(words1) <= 1 or len(words2) <= 1:
sim_value = 0
else:
sim_value = float(len(set(words1).intersection(set(words2))) / (np.log(len(words1)) + np.log(len(words2))))
return sim_value
def sort_sentences(sentences, words,
sim_func=get_similarity,
pagerank_config=None,
pr_error_handle='both'):
f"""
基于TextRank方法对句子以及分词结果进行提取摘要
Parameters:
----------
sentences: list, 切分后的结果句子列表
words: list, 切分后的结果词列表
pagerank_config: dict, networkx.pagerank的参数字典
pr_error_handle: str, pagerank不收敛的时候的处理策略,
iterator:增加迭代轮次(兜底),
tolerance:增加迭代轮次前后的差值阈值
both:增加迭代轮次的同时增加迭代轮次前后的差值阈值
Returns:
-------
list_res: list, 结果列表
"""
list_res = []
# 默认的PR收敛时的参数
pr_alpha = 1
pr_max_iter = 200
pr_tol = 1e-6
if pagerank_config is None:
pagerank_config = {'alpha': pr_alpha,
'max_iter': pr_max_iter,
'tol': pr_tol}
len_sentences = len(sentences)
# 初始化句子之间的无向权图, 整体为N*N的矩阵
graph = np.zeros((len_sentences, len_sentences))
# 计算权重, 权重由切词的相似度进行计算, 由于是无向的, a(ij) = a(ji)
for i in range(len_sentences):
for j in range(len_sentences):
sim_value = sim_func(words[i], words[j])
graph[i, j] = sim_value
graph[j, i] = sim_value
# 构造无向权图
nx_graph = nx.from_numpy_matrix(graph)
# 计算PR值, 注意, 初始参数在计算PR值时可能不收敛, 这个时候可以
flag = True
while flag:
# noinspection PyBroadException
try:
## 开始计算PR值, 可能存在不收敛的情况
pr_values = nx.pagerank(nx_graph, **pagerank_config)
## 成功收敛则停止循环
flag = False
except Exception:
## 如果PR不收敛, 以提升迭代前后轮次之间的差值为策略
if pr_error_handle == 'tolerance':
pr_tol *= 10
## 以提升迭代轮次作为策略
elif pr_error_handle == 'iterator':
pr_max_iter += 100
## 两者同时进行
else:
pr_tol *= 10
pr_max_iter += 100
pagerank_config = {'alpha': pr_alpha,
'max_iter': pr_max_iter,
'tol': pr_tol}
# pr_values: 一个dict, {index:pr, index:pr}
for idx, val in sorted(pr_values.items(), key=lambda x: x[1], reverse=True):
list_res.append({'sentence': sentences[idx],
'weight': val,
'index':idx})
return list_res
|
from helper import unittest, PillowTestCase, hopper
# Not running this test by default. No DOS against Travis CI.
from PIL import PyAccess
import time
def iterate_get(size, access):
(w, h) = size
for x in range(w):
for y in range(h):
access[(x, y)]
def iterate_set(size, access):
(w, h) = size
for x in range(w):
for y in range(h):
access[(x, y)] = (x % 256, y % 256, 0)
def timer(func, label, *args):
iterations = 5000
starttime = time.time()
for x in range(iterations):
func(*args)
if time.time()-starttime > 10:
print("%s: breaking at %s iterations, %.6f per iteration" % (
label, x+1, (time.time()-starttime)/(x+1.0)))
break
if x == iterations-1:
endtime = time.time()
print("%s: %.4f s %.6f per iteration" % (
label, endtime-starttime, (endtime-starttime)/(x+1.0)))
class BenchCffiAccess(PillowTestCase):
def test_direct(self):
im = hopper()
im.load()
# im = Image.new( "RGB", (2000, 2000), (1, 3, 2))
caccess = im.im.pixel_access(False)
access = PyAccess.new(im, False)
self.assertEqual(caccess[(0, 0)], access[(0, 0)])
print ("Size: %sx%s" % im.size)
timer(iterate_get, 'PyAccess - get', im.size, access)
timer(iterate_set, 'PyAccess - set', im.size, access)
timer(iterate_get, 'C-api - get', im.size, caccess)
timer(iterate_set, 'C-api - set', im.size, caccess)
if __name__ == '__main__':
unittest.main()
# End of file
|
import json
import os
import re
import sys
from jsonschema import validate, exceptions
from icon_validator.rules.validator import KomandPluginValidator
from icon_validator.exceptions import ValidationException
class OutputValidator(KomandPluginValidator):
def __init__(self):
super().__init__()
self.missing_outputs = []
def validate_output(self, process_output, spec_schema, process_name, process_type):
try:
validate(process_output, spec_schema)
except(exceptions.ValidationError, exceptions.SchemaError) as e:
self.missing_outputs.append((f'{process_type}:{process_name}', e))
@staticmethod
def get_schemas(spec):
schemas = {}
sys.path.append(spec.directory)
for path, _, files in os.walk(spec.directory):
for file in files:
if "schema.py" in file and os.path.basename(path) != "connection":
full_path = os.path.join(path, file)
schemas[os.path.basename(path)] = OutputValidator.read_schema(full_path)
return schemas
@staticmethod
def read_schema(path):
with open(path) as schema:
text = schema.read()
text = text.strip()
output_pattern = '(?s)"""(.*?)"""'
results = re.findall(output_pattern, text)
if len(results) == 2:
json_ = json.loads(results[1]) # Action: 0 for input, 1 for output
else:
json_ = json.loads(results[2]) # Task: 0 for input, 1 for state, 2 for output
return json_
def validate(self, spec):
schemas = OutputValidator.get_schemas(spec)
actions, tasks = {}, {}
# Prevent parsing action and task less plugin
if spec.actions():
actions = spec.actions()
if spec.tasks():
tasks = spec.tasks()
if not actions and not tasks:
return
for action in actions:
path = os.path.join(spec.directory, f".output/action_{action}.json")
if os.path.exists(path):
with open(path, 'r') as output: # if test output has been generated
self.validate_output(json.load(output), schemas[action], action, "Action")
for task in tasks:
path = os.path.join(spec.directory, f".output/task_{task}.json")
if os.path.exists(path):
with open(path, 'r') as output: # if test output has been generated
self.validate_output(json.load(output), schemas[task], task, "Task")
if len(self.missing_outputs) > 0:
raise ValidationException(f"Action/Task output does not match spec. List of (TYPE:NAME, ERROR): {self.missing_outputs}")
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged.to_tensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.ragged import ragged_conversion_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged.ragged_tensor import RaggedTensor
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedTensorToTensorOpTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def testDocStringExamples(self):
"""Example from ragged_to_tensor.__doc__."""
rt = ragged_factory_ops.constant([[9, 8, 7], [], [6, 5], [4]])
dt = rt.to_tensor()
self.assertAllEqual(dt, [[9, 8, 7], [0, 0, 0], [6, 5, 0], [4, 0, 0]])
@parameterized.parameters(
{
'rt_input': [],
'ragged_rank': 1,
'expected': [],
'expected_shape': [0, 0],
},
{
'rt_input': [[1, 2, 3], [], [4], [5, 6]],
'expected': [[1, 2, 3], [0, 0, 0], [4, 0, 0], [5, 6, 0]]
},
{
'rt_input': [[1, 2, 3], [], [4], [5, 6]],
'default': 9,
'expected': [[1, 2, 3], [9, 9, 9], [4, 9, 9], [5, 6, 9]]
},
{
'rt_input': [[[1], [2], [3]], [], [[4]], [[5], [6]]],
'ragged_rank':
1,
'default': [9],
'expected': [[[1], [2], [3]], [[9], [9], [9]], [[4], [9], [9]],
[[5], [6], [9]]]
},
{
'rt_input': [[[1, 2], [], [3, 4]], [], [[5]], [[6, 7], [8]]],
'expected': [
[[1, 2], [0, 0], [3, 4]], #
[[0, 0], [0, 0], [0, 0]], #
[[5, 0], [0, 0], [0, 0]], #
[[6, 7], [8, 0], [0, 0]], #
]
},
{
'rt_input': [[[1, 2], [], [3, 4]], [], [[5]], [[6, 7], [8]]],
'default':
9,
'expected': [
[[1, 2], [9, 9], [3, 4]], #
[[9, 9], [9, 9], [9, 9]], #
[[5, 9], [9, 9], [9, 9]], #
[[6, 7], [8, 9], [9, 9]], #
]
},
{
'rt_input': [[[1], [2], [3]]],
'ragged_rank': 1,
'default': 0,
'expected': [[[1], [2], [3]]],
},
{
'rt_input': [[[[1], [2]], [], [[3]]]],
'default': 9,
'expected': [[[[1], [2]], [[9], [9]], [[3], [9]]]],
},
)
def testRaggedTensorToTensor(self,
rt_input,
expected,
ragged_rank=None,
default=None,
expected_shape=None):
rt = ragged_factory_ops.constant(rt_input, ragged_rank=ragged_rank)
dt = rt.to_tensor(default)
self.assertIsInstance(dt, ops.Tensor)
self.assertEqual(rt.dtype, dt.dtype)
self.assertTrue(dt.shape.is_compatible_with(rt.shape))
if expected_shape is not None:
expected = np.ndarray(expected_shape, buffer=np.array(expected))
self.assertAllEqual(dt, expected)
@parameterized.parameters(
{
'rt_input': [[1, 2, 3]],
'default': [0],
'error': (ValueError, r'Shape \(1,\) must have rank at most 0'),
},
{
'rt_input': [[[1, 2], [3, 4]], [[5, 6]]],
'ragged_rank': 1,
'default': [7, 8, 9],
'error': (ValueError, r'Shapes \(3,\) and \(2,\) are incompatible'),
},
{
'rt_input': [[1, 2, 3]],
'default': 'a',
'error': (TypeError, '.*'),
},
)
def testError(self, rt_input, default, error, ragged_rank=None):
rt = ragged_factory_ops.constant(rt_input, ragged_rank=ragged_rank)
with self.assertRaisesRegexp(error[0], error[1]):
rt.to_tensor(default)
# This covers the tests above, but with the new implementation.
@test_util.run_all_in_graph_and_eager_modes
class RaggedTensorToTensorOpNewTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def testDocStringExamples(self):
"""Example from ragged_to_tensor.__doc__."""
rt = ragged_factory_ops.constant([[9, 8, 7], [], [6, 5], [4]])
dt = ragged_conversion_ops.ragged_to_dense(rt)
self.assertAllEqual(dt, [[9, 8, 7], [0, 0, 0], [6, 5, 0], [4, 0, 0]])
@parameterized.parameters(
{
'rt_input': [],
'ragged_rank': 1,
'expected': [],
'expected_shape': [0, 0],
},
{
'rt_input': [[1, 2, 3], [], [4], [5, 6]],
'expected': [[1, 2, 3], [0, 0, 0], [4, 0, 0], [5, 6, 0]]
},
{
'rt_input': [[1, 2, 3], [], [4], [5, 6]],
'default': 9,
'expected': [[1, 2, 3], [9, 9, 9], [4, 9, 9], [5, 6, 9]]
},
{
'rt_input': [[[1], [2], [3]], [], [[4]], [[5], [6]]],
'ragged_rank':
1,
'default': [9],
'expected': [[[1], [2], [3]], [[9], [9], [9]], [[4], [9], [9]],
[[5], [6], [9]]]
},
{
'rt_input': [[[1, 2], [], [3, 4]], [], [[5]], [[6, 7], [8]]],
'expected': [
[[1, 2], [0, 0], [3, 4]], #
[[0, 0], [0, 0], [0, 0]], #
[[5, 0], [0, 0], [0, 0]], #
[[6, 7], [8, 0], [0, 0]], #
]
},
{
'rt_input': [[[1, 2], [], [3, 4]], [], [[5]], [[6, 7], [8]]],
'default':
9,
'expected': [
[[1, 2], [9, 9], [3, 4]], #
[[9, 9], [9, 9], [9, 9]], #
[[5, 9], [9, 9], [9, 9]], #
[[6, 7], [8, 9], [9, 9]], #
]
},
{
'rt_input': [[[1], [2], [3]]],
'ragged_rank': 1,
'default': 0,
'expected': [[[1], [2], [3]]],
},
{
'rt_input': [[[[1], [2]], [], [[3]]]],
'default': 9,
'expected': [[[[1], [2]], [[9], [9]], [[3], [9]]]],
},
)
def testRaggedTensorToTensor(self,
rt_input,
expected,
ragged_rank=None,
default=None,
expected_shape=None):
rt = ragged_factory_ops.constant(rt_input, ragged_rank=ragged_rank)
dt = ragged_conversion_ops.ragged_to_dense(rt, default_value=default)
self.assertIsInstance(dt, ops.Tensor)
self.assertEqual(rt.dtype, dt.dtype)
self.assertTrue(dt.shape.is_compatible_with(rt.shape))
if expected_shape is not None:
expected = np.ndarray(expected_shape, buffer=np.array(expected))
self.assertAllEqual(dt, expected)
@parameterized.parameters(
{
'rt_input': [[1, 2, 3]],
'default': 'a',
'error': (TypeError, '.*'),
}, {
'rt_input': [[1, 2, 3]],
'default': 'b',
'error': (TypeError, '.*'),
})
def testError(self, rt_input, default, error, ragged_rank=None):
rt = ragged_factory_ops.constant(rt_input, ragged_rank=ragged_rank)
with self.assertRaisesRegexp(error[0], error[1]):
ragged_conversion_ops.ragged_to_dense(rt, default_value=default)
@test_util.run_all_in_graph_and_eager_modes
class RaggedToTensorOpAdditionalTests(test_util.TensorFlowTestCase):
def _compare_to_reference(self,
ragged_tensor,
expected=None,
default_value=None):
treatment = ragged_conversion_ops.ragged_to_dense(
ragged_tensor, default_value=default_value)
control = ragged_tensor.to_tensor(default_value=default_value)
self.assertAllEqual(control, treatment)
if expected is not None:
self.assertAllEqual(expected, treatment)
def test_already_dense_simple(self):
"""This studies a tensor initialized with value_rowids and nrows."""
input_data = RaggedTensor.from_value_rowids(
values=constant_op.constant([6, 7, 8, 9, 10, 11], dtype=dtypes.int64),
value_rowids=constant_op.constant([0, 0, 0, 1, 1, 1],
dtype=dtypes.int64),
nrows=constant_op.constant(2, dtype=dtypes.int64),
validate=True)
self._compare_to_reference(input_data, [[6, 7, 8], [9, 10, 11]])
def test_already_dense_with_dense_values_and_default(self):
"""This studies a tensor initialized with value_rowids and nrows."""
input_data = RaggedTensor.from_value_rowids(
values=constant_op.constant(
[[6, 7], [8, 9], [10, 11], [12, 13], [14, 15], [16, 17]],
dtype=dtypes.int64),
value_rowids=constant_op.constant([0, 0, 0, 1, 1, 1],
dtype=dtypes.int64),
nrows=constant_op.constant(2, dtype=dtypes.int64),
validate=True)
self._compare_to_reference(
input_data,
[[[6, 7], [8, 9], [10, 11]], [[12, 13], [14, 15], [16, 17]]],
default_value=constant_op.constant([31, 32], dtype=dtypes.int64))
def test_already_dense_with_dense_values(self):
"""This studies a tensor initialized with value_rowids and nrows."""
input_data = RaggedTensor.from_value_rowids(
values=constant_op.constant(
[[6, 7], [8, 9], [10, 11], [12, 13], [14, 15], [16, 17]],
dtype=dtypes.int64),
value_rowids=constant_op.constant([0, 0, 0, 1, 1, 1],
dtype=dtypes.int64),
nrows=constant_op.constant(2, dtype=dtypes.int64),
validate=True)
self._compare_to_reference(
input_data,
[[[6, 7], [8, 9], [10, 11]], [[12, 13], [14, 15], [16, 17]]])
def test_ragged_with_dense_values_and_default(self):
"""This studies a tensor initialized with value_rowids and nrows."""
input_data = RaggedTensor.from_value_rowids(
values=constant_op.constant(
[[6, 7], [8, 9], [10, 11], [12, 13], [14, 15]], dtype=dtypes.int64),
value_rowids=constant_op.constant([0, 0, 0, 1, 1], dtype=dtypes.int64),
nrows=constant_op.constant(2, dtype=dtypes.int64),
validate=True)
self._compare_to_reference(
input_data, [[[6, 7], [8, 9], [10, 11]], [[12, 13], [14, 15], [2, 3]]],
default_value=[2, 3])
def test_ragged_with_dense_values_and_small_default(self):
"""This studies a tensor initialized with value_rowids and nrows."""
input_data = RaggedTensor.from_value_rowids(
values=constant_op.constant(
[[6, 7], [8, 9], [10, 11], [12, 13], [14, 15]], dtype=dtypes.int64),
value_rowids=constant_op.constant([0, 0, 0, 1, 1], dtype=dtypes.int64),
nrows=constant_op.constant(2, dtype=dtypes.int64),
validate=True)
self._compare_to_reference(
input_data, [[[6, 7], [8, 9], [10, 11]], [[12, 13], [14, 15], [2, 2]]],
default_value=2)
def test_already_dense_with_dense_values_string(self):
"""This studies a tensor initialized with value_rowids and nrows."""
input_data = RaggedTensor.from_value_rowids(
values=constant_op.constant(
[[b'a', b'b'], [b'c', b'd'], [b'e', b'f'], [b'g', b'jalapeno'],
[b'kangaroo', b'llama'], [b'manzana', b'nectar']],
dtype=dtypes.string),
value_rowids=constant_op.constant([0, 0, 0, 1, 1, 1],
dtype=dtypes.int64),
nrows=constant_op.constant(2, dtype=dtypes.int64),
validate=True)
self._compare_to_reference(input_data,
[[[b'a', b'b'], [b'c', b'd'], [b'e', b'f']],
[[b'g', b'jalapeno'], [b'kangaroo', b'llama'],
[b'manzana', b'nectar']]])
def test_already_dense_with_string(self):
"""This studies a tensor initialized with value_rowids and nrows."""
input_data = RaggedTensor.from_value_rowids(
values=constant_op.constant(
['a', 'b', 'c', 'd', 'e', 'antidisestablishmentarianism'],
dtype=dtypes.string),
value_rowids=constant_op.constant([0, 0, 0, 1, 1, 1],
dtype=dtypes.int64),
nrows=constant_op.constant(2, dtype=dtypes.int64),
validate=True)
self._compare_to_reference(
input_data,
[[b'a', b'b', b'c'], [b'd', b'e', b'antidisestablishmentarianism']])
def test_already_dense(self):
input_data = ragged_factory_ops.constant([[0, 1, 2], [3, 4, 5]])
self._compare_to_reference(input_data, [[0, 1, 2], [3, 4, 5]])
def test_true_ragged(self):
input_data = ragged_factory_ops.constant([[0, 1, 2], [], [3]])
self._compare_to_reference(input_data, [[0, 1, 2], [0, 0, 0], [3, 0, 0]])
def test_true_ragged_default_3(self):
input_data = ragged_factory_ops.constant([[0, 1, 2], [], [3]])
self._compare_to_reference(
input_data, [[0, 1, 2], [3, 3, 3], [3, 3, 3]], default_value=3)
def test_three_dimensional_ragged(self):
input_data = ragged_factory_ops.constant([[[0, 1, 2], []], [], [[3]]])
self._compare_to_reference(
input_data, [[[0, 1, 2], [3, 3, 3]], [[3, 3, 3], [3, 3, 3]],
[[3, 3, 3], [3, 3, 3]]],
default_value=3)
def test_empty_tensor(self):
input_data = RaggedTensor.from_value_rowids(
values=constant_op.constant([], dtype=dtypes.int64),
value_rowids=constant_op.constant([], dtype=dtypes.int64),
nrows=constant_op.constant(2, dtype=dtypes.int64),
validate=True)
self._compare_to_reference(input_data, [[], []], default_value=3)
def test_empty_last(self):
input_data = ragged_factory_ops.constant([[0, 1, 2], [], [3], []])
self._compare_to_reference(input_data,
[[0, 1, 2], [0, 0, 0], [3, 0, 0], [0, 0, 0]])
def test_shape_limit(self):
input_data = ragged_factory_ops.constant([[0, 1, 2, 3], [], [4], []])
actual = ragged_conversion_ops.ragged_to_dense(input_data, shape=[2, 3])
self.assertAllEqual(actual, [[0, 1, 2], [0, 0, 0]])
self.assertEqual(actual.shape.as_list(), [2, 3])
def test_shape_limit_tuple(self):
input_data = ragged_factory_ops.constant([[0, 1, 2, 3], [], [4], []])
actual = ragged_conversion_ops.ragged_to_dense(input_data, shape=(2, 3))
self.assertAllEqual(actual, [[0, 1, 2], [0, 0, 0]])
self.assertEqual(actual.shape.as_list(), [2, 3])
def test_shape_limit_tensor_shape(self):
input_data = ragged_factory_ops.constant([[0, 1, 2, 3], [], [4], []])
actual = ragged_conversion_ops.ragged_to_dense(
input_data, shape=tensor_shape.TensorShape([2, 3]))
self.assertAllEqual(actual, [[0, 1, 2], [0, 0, 0]])
self.assertEqual(actual.shape.as_list(), [2, 3])
def test_shape_half_limit_tensor_shape(self):
input_data = ragged_factory_ops.constant([[0, 1, 2, 3], [], [4], []])
actual = ragged_conversion_ops.ragged_to_dense(
input_data, shape=tensor_shape.TensorShape([2, None]))
self.assertAllEqual(actual, [[0, 1, 2, 3], [0, 0, 0, 0]])
def test_skip_eager_shape_half_limit_tensor_shape(self):
# Eager would produce a shape of [2, 4]
input_data = ragged_factory_ops.constant([[0, 1, 2, 3], [], [4], []])
actual = ragged_conversion_ops.ragged_to_dense(
input_data, shape=tensor_shape.TensorShape([2, None]))
result = actual.shape.as_list()
# This is equal to [2, 4] in eager, or [2, None] in non-eager.
self.assertEqual(result[0], 2)
def test_shape_limit_shape_is_tensor_int64(self):
input_data = ragged_factory_ops.constant([[0, 1, 2, 3], [], [4], []])
actual = ragged_conversion_ops.ragged_to_dense(
input_data, shape=constant_op.constant([2, 3], dtype=dtypes.int64))
self.assertAllEqual(actual, [[0, 1, 2], [0, 0, 0]])
self.assertEqual(actual.shape.as_list(), [2, 3])
def test_shape_limit_shape_is_tensor_int32(self):
input_data = ragged_factory_ops.constant([[0, 1, 2, 3], [], [4], []])
actual = ragged_conversion_ops.ragged_to_dense(
input_data, shape=constant_op.constant([2, 3], dtype=dtypes.int32))
self.assertAllEqual(actual, [[0, 1, 2], [0, 0, 0]])
self.assertEqual(actual.shape.as_list(), [2, 3])
def test_shape_expand_first_dim(self):
input_data = ragged_factory_ops.constant([[0, 1, 2], [], [3]])
actual = ragged_conversion_ops.ragged_to_dense(input_data, shape=[4, 4])
self.assertAllEqual(
actual, [[0, 1, 2, 0], [0, 0, 0, 0], [3, 0, 0, 0], [0, 0, 0, 0]])
self.assertEqual(actual.shape.as_list(), [4, 4])
def test_value_transposed(self):
# This test tries to get a tensor in columnar format, where I am uncertain
# as to whether the underlying op, which copies data in the raw format,
# could fail.
my_value = array_ops.transpose(
constant_op.constant([[0, 1, 2, 3], [4, 5, 6, 7]]))
input_data = RaggedTensor.from_value_rowids(
values=my_value,
value_rowids=constant_op.constant([0, 1, 2, 3], dtype=dtypes.int64),
nrows=constant_op.constant(4, dtype=dtypes.int64),
validate=True)
self._compare_to_reference(input_data,
[[[0, 4]], [[1, 5]], [[2, 6]], [[3, 7]]])
# This fails on the older version of to_tensor.
def test_broadcast_default(self):
# This test is commented out. The functionality here is not supported.
# The dense dimension here is 2 x 2
input_data = ragged_factory_ops.constant([[[[1, 2], [3, 4]]], []],
ragged_rank=1)
# This placeholder has a 2 x 1 dimension.
default_value = array_ops.placeholder_with_default([[5], [6]], shape=None)
actual = ragged_conversion_ops.ragged_to_dense(
input_data, default_value=default_value)
expected = [[[[1, 2], [3, 4]]], [[[5, 5], [6, 6]]]]
self.assertAllEqual(actual, expected)
# This fails on the older version of to_tensor.
def test_broadcast_default_no_placeholder(self):
# Again, this functionality is not supported. It fails more gracefully
# when creating the op.
input_data = ragged_factory_ops.constant([[[[1, 2], [3, 4]]], []],
ragged_rank=1)
# default_value has a 2 x 1 dimension.
default_value = constant_op.constant([[5], [6]], shape=None)
actual = ragged_conversion_ops.ragged_to_dense(
input_data, default_value=default_value)
expected = [[[[1, 2], [3, 4]]], [[[5, 5], [6, 6]]]]
self.assertAllEqual(actual, expected)
def test_shape_expand_second_dim(self):
input_data = ragged_factory_ops.constant([[0, 1, 2], [], [3], []])
actual = ragged_conversion_ops.ragged_to_dense(input_data, shape=[3, 4])
self.assertAllEqual(actual, [[0, 1, 2, 0], [0, 0, 0, 0], [3, 0, 0, 0]])
def test_empty_tensor_with_shape(self):
input_data = RaggedTensor.from_value_rowids(
values=constant_op.constant([], dtype=dtypes.int64),
value_rowids=constant_op.constant([], dtype=dtypes.int64),
nrows=constant_op.constant(2, dtype=dtypes.int64),
validate=True)
actual = ragged_conversion_ops.ragged_to_dense(
input_data, default_value=3, shape=[2, 3])
self.assertAllEqual(actual, [[3, 3, 3], [3, 3, 3]])
if __name__ == '__main__':
googletest.main()
|
import archr
import unittest
from common import build_container
class TestAnalyzerQTrace(unittest.TestCase):
@classmethod
def setUpClass(cls):
build_container("cat")
def check_qtrace_results(self, target, **kwargs):
import qtrace
analyzer = archr.analyzers.QTraceAnalyzer(target)
machine = analyzer.fire(args_suffix=["/etc/passwd"], **kwargs)
syscalls = [
qtrace.syscalls["x86_64"][e[1]][1]
for e in machine.filtered_trace("syscall_start")
]
correct_syscalls = [
"sys_openat",
"sys_fstat",
"sys_fadvise64",
"sys_mmap",
"sys_read",
"sys_write",
"sys_read",
"sys_munmap",
"sys_close",
"sys_close",
"sys_close",
"sys_exit_group",
]
assert "\n".join(correct_syscalls) in "\n".join(syscalls)
def test_qtrace_local(self):
with archr.targets.LocalTarget(["/bin/cat"]).build().start() as target:
self.check_qtrace_results(target)
def test_qtrace_docker(self):
with archr.targets.DockerImageTarget(
"archr-test:cat"
).build().start() as target:
self.check_qtrace_results(target)
if __name__ == "__main__":
unittest.main()
|
""" This script contains the implementation of the MAML algorithms designed by
Chelsea Finn et al. (https://arxiv.org/pdf/1703.03400).
Implementation inspired from the following github repo:
https://github.com/facebookresearch/higher/blob/master/examples/maml-omniglot.py
Terminology:
------------
Support set : a set of training examples
(inputs+labels: iterable of (img, label) pairs)
Query set : a set of test examples
(inputs +labels : iterable of (img, label) pairs )
Task/Dataset : Support set + Query set.
Meta-train set: a set of datasets for meta-training
Meta-test set: a set of datasets for meta-evaluation
Meta-batch size: Number of tasks to consider for a meta-iteration
"""
import time
import copy
import logging
import datetime
import pickle
import numpy as np
import pandas as pd
import os
import gin
import higher
import torch
from torch import nn
import torch.nn.functional as F
import torch.optim as optim
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.python import debug as tf_debug
from tensorflow.keras.models import clone_model
from tensorflow.keras.layers import Dense, Conv2D
from helper import conv_net
from metadl.api.api import MetaLearner, Learner, Predictor
from utils import create_grads_shell, reset_grads, app_custom_grads
@gin.configurable
class MyMetaLearner(MetaLearner):
"""
Replicates the fo-MAML implementation of the Model Agnostic Meta Learner
designed bu Chelsea Finn et al. (https://arxiv.org/pdf/1703.03400).
The meta-learner encapsulates the neural network weights during each
meta-iteration.
Terminology : a task is defined by the pair (Support set, Query set)
-----------
During meta-training :
The learner is trained on the support set for exactly one epoch.
The updated learner is then trained again but this time on the
query set. The gradients of the associated loss is then computed
w.r.t the initial learner's parameters (2nd order opt. original
MAML) or w.r.t. to the updated parameters (1st order approx
fo-MAML).We perform the previous steps for a number of
(learner, tasks) pairs and aggregate the gradients from each pair
to perform a meta-update of the initial learner's parameters
(that are the same at the beginning of the process).
During meta-testing :
The pre-trained (during meta-training) learner is fine-tuned with
the support set. Then we evaluate the algorithm's performance by
predicting labels of query examples, as in standard ML/DL problems.
"""
def __init__(self,
meta_iterations,
meta_batch_size,
support_batch_size,
query_batch_size,
img_size,
N_ways):
"""
Args:
meta_iterations : number of meta-iterations to perform, i.e. the
number of times the meta-learner's weights are updated.
meta_batch_size : The number of (learner, task) pairs that are used
to produce the meta-gradients, used to update the meta-learner's
weights after each meta-iteration.
support_batch_size : The batch size for the support set.
query_batch_size : The batch size for the query set.
img_size : Integer, images are considered to be
(img_size, img_size, 3)
"""
super().__init__()
self.meta_iterations = meta_iterations
self.meta_batch_size = meta_batch_size
self.support_batch_size = support_batch_size
self.query_batch_size = query_batch_size
self.img_size = img_size
self.N_ways = N_ways
self.meta_learner = conv_net(self.N_ways, img_size)
torch.manual_seed(1234)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(1234)
np.random.seed(1234)
self.device = torch.device('cuda:1')
self.meta_learner.to(device=self.device)
self.meta_opt = optim.Adam(self.meta_learner.parameters(), lr=1e-3)
def dataloader(self, dataset_episodic):
to_torch_labels = lambda a: torch.from_numpy(a.numpy()).long()
to_torch_imgs = lambda a: torch.from_numpy(
np.transpose(a.numpy(), (0, 3, 1, 2)))
def data_loader(n_batches):
for i, (e, _) in enumerate(dataset_episodic):
if i == n_batches:
break
logging.info('e shape: {}'.format(len(e)))
yield (to_torch_imgs(e[0]), to_torch_labels(e[1]),
to_torch_imgs(e[3]), to_torch_labels(e[4]))
datal = data_loader(n_batches=1)
for i, batch in enumerate(datal):
data_support, labels_support, data_query, labels_query = [
x.to(device=self.device) for x in batch]
logging.info(
'Supp imgs: {} | Supp labs : {} | Query imgs : {} | Query labs \n \n'.format(
data_support.shape, labels_support.shape, data_query.shape, labels_query.shape))
def process_task(self, batch):
"""
batch : [sup_imgs, sup_labs, sup_tidx, qry_imgs, qry_labs, qry_tidx]
sup_imgs : [batch_idx, nbr_imgs, H, W, C]
"""
to_torch_labels = lambda a: torch.from_numpy(a.numpy()).long()
to_torch_imgs = lambda a: torch.from_numpy(
np.transpose(a.numpy(), (0, 1, 4, 2, 3)))
return (to_torch_imgs(batch[0]), to_torch_labels(batch[1]),
to_torch_imgs(batch[3]), to_torch_labels(batch[4]))
def meta_fit(self, meta_dataset_generator):
""" Encapsulates the meta-learning procedure. In the fo-MAML case,
the meta-learner's weights update.
Args:
meta_dataset_generator : a DataGenerator object. We can access
the meta-train and meta-validation episodes via its attributes.
Refer to the metadl/data/dataset.py for more details.
Returns:
A Learner object initialized with the meta-learner's weights.
"""
# Load dataset in db
meta_train_dataset = meta_dataset_generator.meta_train_pipeline
meta_valid_dataset = meta_dataset_generator.meta_valid_pipeline
meta_train_dataset = meta_train_dataset.batch(32)
mtrain_iterator = meta_train_dataset.__iter__()
log = []
for epoch in range(self.meta_iterations):
if epoch % 20 == 0 :
tmp_learner = MyLearner(self.meta_learner)
self.train(mtrain_iterator, self.meta_learner, self.device,
self.meta_opt, epoch, log)
return MyLearner(self.meta_learner)
def train(self, db, net, device, meta_opt, epoch, log):
net.train()
#n_train_iter = db.x_train.shape[0] // db.batchsz
n_train_iter = 4
for batch_idx in range(n_train_iter):
start_time = time.time()
# Sample a batch of support and query images and labels.
batch = next(db)
batch = batch[0]
batch = self.process_task(batch)
x_spt, y_spt, x_qry, y_qry = [
x.to(device=self.device) for x in batch]
task_num, setsz, c_, h, w = x_spt.size()
logging.debug('Task num: {} | Setsz: {} | c_ : {} | h : {} | w : {}'.format(
task_num, setsz, c_, h, w))
querysz = x_qry.size(1)
logging.debug(
f'sup_x : {x_spt[0].shape} | sup_y : {y_spt[0].shape} | qry_x : {x_qry[0].shape} | qry_y : {y_qry[0].shape}')
# Initialize the inner optimizer to adapt the parameters to
# the support set.
n_inner_iter = 2 # This increases significantly the GPU load
inner_opt = torch.optim.SGD(net.parameters(), lr=1e-1)
qry_losses = []
qry_accs = []
meta_opt.zero_grad()
for i in range(task_num):
with higher.innerloop_ctx(
net, inner_opt, copy_initial_weights=False
) as (fnet, diffopt):
# Optimize the likelihood of the support set by taking
# gradient steps w.r.t. the model's parameters.
# This adapts the model's meta-parameters to the task.
# higher is able to automatically keep copies of
# your network's parameters as they are being updated.
for _ in range(n_inner_iter):
spt_logits = fnet(x_spt[i])
spt_loss = F.cross_entropy(spt_logits, y_spt[i])
diffopt.step(spt_loss)
# The final set of adapted parameters will induce some
# final loss and accuracy on the query dataset.
# These will be used to update the model's meta-parameters.
qry_logits = fnet(x_qry[i])
qry_loss = F.cross_entropy(qry_logits, y_qry[i])
qry_losses.append(qry_loss.detach())
qry_acc = (qry_logits.argmax(
dim=1) == y_qry[i]).sum().item() / querysz
qry_accs.append(qry_acc)
# Update the model's meta-parameters to optimize the query
# losses across all of the tasks sampled in this batch.
# This unrolls through the gradient steps.
qry_loss.backward()
meta_opt.step()
qry_losses = sum(qry_losses) / task_num
qry_accs = 100. * sum(qry_accs) / task_num
i = epoch + float(batch_idx) / n_train_iter
iter_time = time.time() - start_time
if batch_idx % 4 == 0:
logging.info(
f'[Epoch {i:.2f}] Train Loss: {qry_losses:.2f} | Acc: {qry_accs:.2f} | Time: {iter_time:.2f}'
)
log.append({
'epoch': i,
'loss': qry_losses,
'acc': qry_accs,
'mode': 'train',
'time': time.time(),
})
@gin.configurable
class MyLearner(Learner):
""" In the case of fo-MAML, encapsulates a neural network and its training
methods.
"""
def __init__(self,
neural_net = None,
num_epochs=5,
lr=1e-1,
img_size=128):
"""
Args:
neural_net : a keras.Sequential object. A neural network model to
copy as Learner.
num_epochs : Integer, the number of epochs to consider for the
training on support examples.
lr : Float, the learning rate associated to the learning procedure
(Adaptation).
img_size : Integer, images are considered to be
(img_size,img_size,3)
"""
super().__init__()
if neural_net == None :
self.learner = conv_net(5, img_size=img_size)
else :
self.learner = neural_net
self.optimizer = torch.optim.SGD(self.learner.parameters(), lr=lr) # inner opt
self.n_inner_iter = num_epochs
def __call__(self, imgs):
return self.learner(imgs)
def process_task(self, images, labels):
"""
batch : [sup_imgs, sup_labs, sup_tidx, qry_imgs, qry_labs, qry_tidx]
sup_imgs : [batch_idx, nbr_imgs, H, W, C]
"""
to_torch_labels = lambda a: torch.from_numpy(a.numpy()).long()
to_torch_imgs = lambda a: torch.from_numpy(
np.transpose(a.numpy(), (0, 3, 1, 2)))
return to_torch_imgs(images), to_torch_labels(labels)
def fit(self, dataset_train):
""" The learner's fit function over the train set of a task.
Args:
dataset_train : a tf.data.Dataset object. Iterates over the training
examples (support set).
Returns:
predictor : An instance of MyPredictor that is initilialized with
the fine-tuned learner's weights in this case.
"""
self.learner.train()
for images, labels in dataset_train:
images, labels = self.process_task(images, labels)
with higher.innerloop_ctx(self.learner, self.optimizer,
track_higher_grads=False) as (fnet, diffopt):
# Optimize the likelihood of the support set by taking
# gradient steps w.r.t. the model's parameters.
# This adapts the model's meta-parameters to the task.
for _ in range(self.n_inner_iter):
spt_logits = fnet(images)
spt_loss = F.cross_entropy(spt_logits, labels)
diffopt.step(spt_loss)
predictor = MyPredictor(fnet)
break
return predictor
def load(self, model_dir):
"""Loads the learner model from a pickle file.
Args:
model_dir: the directory name in which the participant's code and
their saved/serialized learner are stored.
"""
if(os.path.isdir(model_dir) != True):
raise ValueError(('The model directory provided is invalid. Please'
+ ' check that its path is valid.'))
ckpt_path = os.path.join(model_dir, 'learner.pt')
self.learner.load_state_dict(torch.load(ckpt_path))
def save(self, model_dir):
"""Saves the learner model into a pickle file.
Args:
model_dir: the directory name from which the participant's code and
their saved/serialized learner are loaded.
"""
if(os.path.isdir(model_dir) != True):
os.makedirs(model_dir, exist_ok=True)
#raise ValueError(('The model directory provided is invalid. Please'
# + ' check that its path is valid.'))
ckpt_file = os.path.join(model_dir, 'learner.pt')
torch.save(self.learner.state_dict(), ckpt_file)
@gin.configurable
class MyPredictor(Predictor):
""" The predictor is meant to predict labels of the query examples at
meta-test time.
"""
def __init__(self,
learner):
"""
Args:
learner : a MyLearner object that encapsulates the fine-tuned
neural network.
"""
super().__init__()
self.learner = learner
def process_imgs(self, images):
to_torch_imgs = lambda a: torch.from_numpy(
np.transpose(a.numpy(), (0, 3, 1, 2)))
return to_torch_imgs(images)
def predict(self, dataset_test):
""" Predicts labels of the query set examples associated to a task.
Note that the query set is a tf.data.Dataset containing 50 examples for
the Omniglot dataset.
Args:
dataset_test : a tf.data.Dataset object. An iterator over the
unlabelled query examples.
Returns:
preds : tensors, shape (num_examples, N_ways). We are using the
Sparse Categorical Accuracy to evaluate the predictions. Valid
tensors can take 2 different forms described below.
Case 1 : The i-th prediction row contains the i-th example logits.
Case 2 : The i-th prediction row contains the i-th example
probabilities.
Since in both cases the SparseCategoricalAccuracy behaves the same way,
i.e. taking the argmax of the row inputs, both forms are valid.
Note : In the challenge N_ways = 5 at meta-test time.
"""
self.learner.eval()
for images in dataset_test:
logging.debug('Images shape : {}'.format(images))
images = self.process_imgs(images[0])
qry_logits = self.learner(images).detach()
return qry_logits
|
from cloudshell.shell.core.driver_context import ResourceCommandContext, AutoLoadDetails, AutoLoadAttribute, \
AutoLoadResource
from collections import defaultdict
class LegacyUtils(object):
def __init__(self):
self._datamodel_clss_dict = self.__generate_datamodel_classes_dict()
def migrate_autoload_details(self, autoload_details, context):
model_name = context.resource.model
root_name = context.resource.name
root = self.__create_resource_from_datamodel(model_name, root_name)
attributes = self.__create_attributes_dict(autoload_details.attributes)
self.__attach_attributes_to_resource(attributes, '', root)
self.__build_sub_resoruces_hierarchy(root, autoload_details.resources, attributes)
return root
def __create_resource_from_datamodel(self, model_name, res_name):
return self._datamodel_clss_dict[model_name](res_name)
def __create_attributes_dict(self, attributes_lst):
d = defaultdict(list)
for attribute in attributes_lst:
d[attribute.relative_address].append(attribute)
return d
def __build_sub_resoruces_hierarchy(self, root, sub_resources, attributes):
d = defaultdict(list)
for resource in sub_resources:
splitted = resource.relative_address.split('/')
parent = '' if len(splitted) == 1 else resource.relative_address.rsplit('/', 1)[0]
rank = len(splitted)
d[rank].append((parent, resource))
self.__set_models_hierarchy_recursively(d, 1, root, '', attributes)
def __set_models_hierarchy_recursively(self, dict, rank, manipulated_resource, resource_relative_addr, attributes):
if rank not in dict: # validate if key exists
pass
for (parent, resource) in dict[rank]:
if parent == resource_relative_addr:
sub_resource = self.__create_resource_from_datamodel(
resource.model.replace(' ', ''),
resource.name)
self.__attach_attributes_to_resource(attributes, resource.relative_address, sub_resource)
manipulated_resource.add_sub_resource(
self.__slice_parent_from_relative_path(parent, resource.relative_address), sub_resource)
self.__set_models_hierarchy_recursively(
dict,
rank + 1,
sub_resource,
resource.relative_address,
attributes)
def __attach_attributes_to_resource(self, attributes, curr_relative_addr, resource):
for attribute in attributes[curr_relative_addr]:
setattr(resource, attribute.attribute_name.lower().replace(' ', '_'), attribute.attribute_value)
del attributes[curr_relative_addr]
def __slice_parent_from_relative_path(self, parent, relative_addr):
if parent is '':
return relative_addr
return relative_addr[len(parent) + 1:] # + 1 because we want to remove the seperator also
def __generate_datamodel_classes_dict(self):
return dict(self.__collect_generated_classes())
def __collect_generated_classes(self):
import sys, inspect
return inspect.getmembers(sys.modules[__name__], inspect.isclass)
class AdminAnsibleConfig2G(object):
def __init__(self, name):
"""
"""
self.attributes = {}
self.resources = {}
self._cloudshell_model_name = 'Admin Ansible Config 2G'
self._name = name
def add_sub_resource(self, relative_path, sub_resource):
self.resources[relative_path] = sub_resource
@classmethod
def create_from_context(cls, context):
"""
Creates an instance of NXOS by given context
:param context: cloudshell.shell.core.driver_context.ResourceCommandContext
:type context: cloudshell.shell.core.driver_context.ResourceCommandContext
:return:
:rtype AdminAnsibleConfig2G
"""
result = AdminAnsibleConfig2G(name=context.resource.name)
for attr in context.resource.attributes:
result.attributes[attr] = context.resource.attributes[attr]
return result
def create_autoload_details(self, relative_path=''):
"""
:param relative_path:
:type relative_path: str
:return
"""
resources = [AutoLoadResource(model=self.resources[r].cloudshell_model_name,
name=self.resources[r].name,
relative_address=self._get_relative_path(r, relative_path))
for r in self.resources]
attributes = [AutoLoadAttribute(relative_path, a, self.attributes[a]) for a in self.attributes]
autoload_details = AutoLoadDetails(resources, attributes)
for r in self.resources:
curr_path = relative_path + '/' + r if relative_path else r
curr_auto_load_details = self.resources[r].create_autoload_details(curr_path)
autoload_details = self._merge_autoload_details(autoload_details, curr_auto_load_details)
return autoload_details
def _get_relative_path(self, child_path, parent_path):
"""
Combines relative path
:param child_path: Path of a model within it parent model, i.e 1
:type child_path: str
:param parent_path: Full path of parent model, i.e 1/1. Might be empty for root model
:type parent_path: str
:return: Combined path
:rtype str
"""
return parent_path + '/' + child_path if parent_path else child_path
@staticmethod
def _merge_autoload_details(autoload_details1, autoload_details2):
"""
Merges two instances of AutoLoadDetails into the first one
:param autoload_details1:
:type autoload_details1: AutoLoadDetails
:param autoload_details2:
:type autoload_details2: AutoLoadDetails
:return:
:rtype AutoLoadDetails
"""
for attribute in autoload_details2.attributes:
autoload_details1.attributes.append(attribute)
for resource in autoload_details2.resources:
autoload_details1.resources.append(resource)
return autoload_details1
@property
def cloudshell_model_name(self):
"""
Returns the name of the Cloudshell model
:return:
"""
return 'AdminAnsibleConfig2G'
@property
def address(self):
"""
:rtype: str
"""
return self.attributes['Admin Ansible Config 2G.Address'] if 'Admin Ansible Config 2G.Address' in self.attributes else None
@address.setter
def address(self, value):
"""
(Optional) Address of Script Repo Server.
:type value: str
"""
self.attributes['Admin Ansible Config 2G.Address'] = value
@property
def repo_user(self):
"""
:rtype: str
"""
return self.attributes['Admin Ansible Config 2G.Repo User'] if 'Admin Ansible Config 2G.Repo User' in self.attributes else None
@repo_user.setter
def repo_user(self, value):
"""
(Optional) Source Control user for private repo authentication. Required for Github Private Repo. For Gitlab user not required, only access token in password field.
:type value: str
"""
self.attributes['Admin Ansible Config 2G.Repo User'] = value
@property
def repo_password(self):
"""
:rtype: string
"""
return self.attributes['Admin Ansible Config 2G.Repo Password'] if 'Admin Ansible Config 2G.Repo Password' in self.attributes else None
@repo_password.setter
def repo_password(self, value):
"""
(Optional) Source Control password for private repo authentication. For GitLab, add private access token here.
:type value: string
"""
self.attributes['Admin Ansible Config 2G.Repo Password'] = value
@property
def playbook_base_path(self):
"""
:rtype: str
"""
return self.attributes['Admin Ansible Config 2G.Playbook Base Path'] if 'Admin Ansible Config 2G.Playbook Base Path' in self.attributes else None
@playbook_base_path.setter
def playbook_base_path(self, value):
"""
Base URL to script. This path will join with script path passed to execute playbook command. (Github - https://raw.githubusercontent.com/QualiSystemsLab/App-Configuration-Demo-Scripts/master/, Gitlab - http://<SERVER_IP>/api/v4/projects/<PROJECT_ID>/repository/files)
:type value: str
"""
self.attributes['Admin Ansible Config 2G.Playbook Base Path'] = value
@property
def playbook_script_path(self):
"""
:rtype: str
"""
return self.attributes['Admin Ansible Config 2G.Playbook Script Path'] if 'Admin Ansible Config 2G.Playbook Script Path' in self.attributes else None
@playbook_script_path.setter
def playbook_script_path(self, value):
"""
Path to script from root of repo. This will join with base path to create full URL.
:type value: str
"""
self.attributes['Admin Ansible Config 2G.Playbook Script Path'] = value
@property
def playbook_url_full(self):
"""
:rtype: str
"""
return self.attributes['Admin Ansible Config 2G.Playbook URL Full'] if 'Admin Ansible Config 2G.Playbook URL Full' in self.attributes else None
@playbook_url_full.setter
def playbook_url_full(self, value):
"""
Full path URL of script. For Github can be "raw" url. For gitlab, pass Rest API formatted url (Github - https://raw.githubusercontent.com/QualiSystemsLab/App-Configuration-Demo-Scripts/master/<FILE_PATH>, Gitlab - http://<SERVER_IP>/api/v4/projects/<PROJECT_ID>/repository/files/<FILE_PATH>/raw?ref=<BRANCH>)
:type value: str
"""
self.attributes['Admin Ansible Config 2G.Playbook URL Full'] = value
@property
def connection_method(self):
"""
:rtype: str
"""
return self.attributes['Admin Ansible Config 2G.Connection Method'] if 'Admin Ansible Config 2G.Connection Method' in self.attributes else None
@connection_method.setter
def connection_method(self, value='ssh'):
"""
Specifies the "ansible_connection" host variable
:type value: str
"""
self.attributes['Admin Ansible Config 2G.Connection Method'] = value
@property
def script_parameters(self):
"""
:rtype: str
"""
return self.attributes['Admin Ansible Config 2G.Script Parameters'] if 'Admin Ansible Config 2G.Script Parameters' in self.attributes else None
@script_parameters.setter
def script_parameters(self, value):
"""
(Optional) key pair values passed to HOST_VARS. Can pass simple arguments in this format (ansible_var1,val1;ansible_var2,val2) or JSON format for nested data structures (a dictionary or list of dictionaries accepted).
:type value: str
"""
self.attributes['Admin Ansible Config 2G.Script Parameters'] = value
@property
def inventory_groups(self):
"""
:rtype: str
"""
return self.attributes['Admin Ansible Config 2G.Inventory Groups'] if 'Admin Ansible Config 2G.Inventory Groups' in self.attributes else None
@inventory_groups.setter
def inventory_groups(self, value):
"""
(Optional) Designating groups in playbook to be executed.
:type value: str
"""
self.attributes['Admin Ansible Config 2G.Inventory Groups'] = value
@property
def ansible_cmd_args(self):
"""
:rtype: str
"""
return self.attributes['Admin Ansible Config 2G.Ansible CMD Args'] if 'Admin Ansible Config 2G.Ansible CMD Args' in self.attributes else None
@ansible_cmd_args.setter
def ansible_cmd_args(self, value):
"""
(Optional) Additional arguments passed to ansible-playbook command line execution.
:type value: str
"""
self.attributes['Admin Ansible Config 2G.Ansible CMD Args'] = value
@property
def timeout_minutes(self):
"""
:rtype: float
"""
return self.attributes['Admin Ansible Config 2G.Timeout Minutes'] if 'Admin Ansible Config 2G.Timeout Minutes' in self.attributes else None
@timeout_minutes.setter
def timeout_minutes(self, value='10'):
"""
(Optional) Minutes to wait while polling target hosts.
:type value: float
"""
self.attributes['Admin Ansible Config 2G.Timeout Minutes'] = value
@property
def gitlab_branch(self):
"""
:rtype: str
"""
return self.attributes['Admin Ansible Config 2G.Gitlab Branch'] if 'Admin Ansible Config 2G.Gitlab Branch' in self.attributes else None
@gitlab_branch.setter
def gitlab_branch(self, value):
"""
(Optional) Defaults to master branch. This attribute relevant for downloading from non-master branches in Gitlab repos.
:type value: str
"""
self.attributes['Admin Ansible Config 2G.Gitlab Branch'] = value
@property
def ansible_config_selector(self):
"""
:rtype: str
"""
return self.attributes['Admin Ansible Config 2G.Ansible Config Selector'] if 'Admin Ansible Config 2G.Ansible Config Selector' in self.attributes else None
@ansible_config_selector.setter
def ansible_config_selector(self, value):
"""
(Optional) An alternative to connectors. Create and match this attribute value on target resources. Both matching selector and connected resources will run together.
:type value: str
"""
self.attributes['Admin Ansible Config 2G.Ansible Config Selector'] = value
@property
def name(self):
"""
:rtype: str
"""
return self._name
@name.setter
def name(self, value):
"""
:type value: str
"""
self._name = value
@property
def cloudshell_model_name(self):
"""
:rtype: str
"""
return self._cloudshell_model_name
@cloudshell_model_name.setter
def cloudshell_model_name(self, value):
"""
:type value: str
"""
self._cloudshell_model_name = value
|
# Copyright (c) 2022, Josef Engelskirchen and Contributors
# See license.txt
# import frappe
import unittest
class TestComplaintText(unittest.TestCase):
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.