hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fb379a5d54a44233082012b9017978ed60b5aae8
| 225
|
py
|
Python
|
smurf.py
|
haelee/allbypythonself
|
499b1c696df8cac20e863354e7c6b68e9d4bff07
|
[
"MIT"
] | 3
|
2019-09-23T03:33:10.000Z
|
2020-07-16T06:51:46.000Z
|
smurf.py
|
haelee/allbypythonself
|
499b1c696df8cac20e863354e7c6b68e9d4bff07
|
[
"MIT"
] | null | null | null |
smurf.py
|
haelee/allbypythonself
|
499b1c696df8cac20e863354e7c6b68e9d4bff07
|
[
"MIT"
] | 4
|
2019-09-23T04:55:27.000Z
|
2021-05-22T01:09:40.000Z
|
# All-by-Pythonself
# Snippet for the Smurf attacks
# by Hae Young Lee
# at Cheongju University
from scapy . all import *
p = Ether (dst = "ff:ff:ff:ff:ff:ff") / IP (dst = "10.0.2.255", src = "10.0.2.1") / ICMP ()
sendp (p)
| 25
| 91
| 0.631111
|
64807847b18a66195c0aa32c57ae22680c02cd64
| 2,339
|
py
|
Python
|
CSCA48 - Introduction to CS 2/ex10.py
|
zaind6/University-CS-Exercises
|
2c48a35f2b9e8b96cfc1384e225ced94ae00badc
|
[
"MIT"
] | 1
|
2020-11-03T01:35:35.000Z
|
2020-11-03T01:35:35.000Z
|
CSCA48 - Introduction to CS 2/ex10.py
|
zain-zafar/University-CS-Exercises
|
2c48a35f2b9e8b96cfc1384e225ced94ae00badc
|
[
"MIT"
] | null | null | null |
CSCA48 - Introduction to CS 2/ex10.py
|
zain-zafar/University-CS-Exercises
|
2c48a35f2b9e8b96cfc1384e225ced94ae00badc
|
[
"MIT"
] | null | null | null |
def radix_sort(main_bin):
'''(list of int) -> list of int
REQ: list contains all positive integers or 0
>>> radix_sort([1,2,3,1,2,3,4,0,4])
[0,1,1,2,2,3,3,4,4]
Return a sorted list, using radix method
'''
# Initialize 10 bins
bin_0, bin_1, bin_2, bin_3, bin_4 = [], [], [], [], []
bin_5, bin_6, bin_7, bin_8, bin_9 = [], [], [], [], []
# Find the number of largest digit place
biggest = len(str(max(main_bin)))
# Create a empty list
holder = []
# Make all elements the same length by adding zeros to the ones with length
# less than biggest
for i in main_bin:
i = str(i)
while len(i) != biggest:
i = '0' + str(i)
holder.append(i)
# Starting from 1, all the way to the last values of the elements,
# sort them into their bins
for counter in range(1, biggest + 1):
for elements in holder:
store = elements[-counter]
if store == '0':
bin_0.append(elements)
elif store == '1':
bin_1.append(elements)
elif store == '2':
bin_2.append(elements)
elif store == '3':
bin_3.append(elements)
elif store == '4':
bin_4.append(elements)
elif store == '5':
bin_5.append(elements)
elif store == '6':
bin_6.append(elements)
elif store == '7':
bin_7.append(elements)
elif store == '8':
bin_8.append(elements)
elif store == '9':
bin_9.append(elements)
# Set Main bin as the sum of the bins from 0 - 9
main_bin = bin_0 + bin_1 + bin_2 + bin_3 + bin_4 +\
bin_5 + bin_6 + bin_7 + bin_8 + bin_9
# Set holder as the main bin
holder = main_bin
# Clear all the values inside the bins
bin_0, bin_1, bin_2, bin_3, bin_4 = [], [], [], [], []
bin_5, bin_6, bin_7, bin_8, bin_9 = [], [], [], [], []
# Once all the digits have been checked and the elements have been placed
# into bins, then turn the main bin into a list of int, which will
# remove all the extra 0's
main_bin = list(map(int, main_bin))
# Return the sorted main bin
return main_bin
| 29.2375
| 79
| 0.525438
|
9d615ddd13ef816a01f3d701702dd01ffc8337e0
| 8,670
|
py
|
Python
|
macgraph/cell/messaging_cell.py
|
Octavian-ai/mac-graph
|
3ef978e8a6f79f2dcc46783d34f01934aabf7f19
|
[
"Unlicense"
] | 116
|
2018-07-11T13:19:56.000Z
|
2021-07-26T17:22:44.000Z
|
macgraph/cell/messaging_cell.py
|
Octavian-ai/mac-graph
|
3ef978e8a6f79f2dcc46783d34f01934aabf7f19
|
[
"Unlicense"
] | 1
|
2019-02-11T02:25:02.000Z
|
2019-02-11T17:05:19.000Z
|
macgraph/cell/messaging_cell.py
|
Octavian-ai/mac-graph
|
3ef978e8a6f79f2dcc46783d34f01934aabf7f19
|
[
"Unlicense"
] | 21
|
2018-10-11T23:03:22.000Z
|
2021-07-14T22:42:08.000Z
|
from typing import NamedTuple
import tensorflow as tf
from .types import *
from .query import *
from .messaging_cell_helpers import *
from ..args import ACTIVATION_FNS
from ..attention import *
from ..input import get_table_with_embedding
from ..const import EPSILON
from ..util import *
from ..layers import *
from ..activations import *
def lerp (a, b, f):
return (1-f)*a + f*b
class MessagingCell(Component):
def __init__(self, args):
super().__init__(args, name="mp")
# self.question_tokens = Tensor("question_tokens")
# self.read_gs_attn = AttentionByIndex(args,
# table=self.question_tokens,
# seq_len=args["max_seq_len"],
# table_representation="src", name="read_gs_attn")
def forward(self, features, context):
node_table, node_table_width, node_table_len = get_table_with_embedding(context.args, context.features, context.vocab_embedding, "kb_node")
in_signal = tf.concat([context.control_state, context.in_iter_id], -1)
control_parts = tf.reshape(context.control_state, [context.features["d_batch_size"], -1, context.args["input_width"]])
taps = {}
def add_taps(val, prefix):
ret,tps = val
for k,v in tps.items():
taps[prefix+"_"+k] = v
return ret
in_write_signal = layer_dense(in_signal, context.args["mp_state_width"], "sigmoid")
# in_write_signal = tf.ones([context.features["d_batch_size"], context.args["mp_state_width"]])
# Read/Write queries
# in_write_query = context.control_state
# in_write_query = layer_dense(context.control_state, node_table_width)
# in_write_query = context.in_question_tokens[:,10,:]
in_write_query = add_taps(generate_token_index_query(context, "write_query"), "write_query")
# in_read0_query = context.in_question_tokens[:,14,:]
# in_read0_query = control_parts[:,0,:]
# in_read0_query = tf.layers.dense(generate_query(context, "mp_read_query")[0], node_table_width)
read_queries = []
for i in range(context.args["mp_read_heads"]):
read_queries.append(add_taps(generate_token_index_query(context, f"read{i}_query"), f"read{i}_query"))
# self.question_tokens.bind(context.in_question_tokens_padded)
# global_signal = self.read_gs_attn.forward(features)
global_signal = context.in_question_tokens[:,26,:] # just the cleanliness signal
out_read_signals, node_state, taps2 = self.do_messaging_cell(context,
node_table, node_table_width, node_table_len,
in_write_query, in_write_signal, read_queries, global_signal)
self._taps = {**taps, **taps2}
return out_read_signals, node_state
def taps(self):
return self._taps
def tap_sizes(self):
t = {}
mp_reads = [f"read{i}" for i in range(self.args["mp_read_heads"])]
for mp_head in ["write", *mp_reads]:
t[f"{mp_head}_attn"] = self.args["kb_node_max_len"]
t[f"{mp_head}_attn_raw"] = self.args["kb_node_max_len"]
t[f"{mp_head}_query"] = self.args["kb_node_width"] * self.args["embed_width"]
t[f"{mp_head}_signal"] = self.args["mp_state_width"]
t[f"{mp_head}_query_token_index_attn" ] = self.args["max_seq_len"]
return t
def do_messaging_cell(self, context:CellContext,
node_table, node_table_width, node_table_len,
in_write_query, in_write_signal, in_read_queries, global_signal):
'''
Operate a message passing cell
Each iteration it'll do one round of message passing
Returns: read_signal, node_state
for to_node in nodes:
to_node.state = combine_incoming_signals([
message_pass(from_node, to_node) for from_node in to_node.neighbors
] + [node_self_update(to_node)])
'''
with tf.name_scope("messaging_cell"):
taps = {}
taps["write_query"] = in_write_query
taps["write_signal"] = in_write_signal
node_state_shape = tf.shape(context.in_node_state)
node_state = context.in_node_state
padded_node_table = pad_to_table_len(node_table, node_state, "padded_node_table")
node_ids_width = self.args["embed_width"]
node_ids = node_table[:,:,0:node_ids_width]
padded_node_ids =padded_node_table[:,:,0:node_ids_width]
node_ids_len = node_table_len
# --------------------------------------------------------------------------
# Write to graph
# --------------------------------------------------------------------------
write_signal, _, a_taps = attention_write_by_key(
keys =node_ids,
key_width=node_ids_width,
keys_len =node_ids_len,
query=in_write_query,
value=in_write_signal,
name="write_signal"
)
for k,v in a_taps.items():
taps["write_"+k] = v
write_signal = pad_to_table_len(write_signal, node_state, "write_signal")
node_state += write_signal
node_state = dynamic_assert_shape(node_state, node_state_shape, "node_state")
# --------------------------------------------------------------------------
# Calculate adjacency
# --------------------------------------------------------------------------
node_incoming = calc_normalized_adjacency(context, node_state)
if context.args["use_mp_right_shift"]:
node_incoming = calc_right_shift(node_incoming)
# --------------------------------------------------------------------------
# Perform propagation
# --------------------------------------------------------------------------
if context.args["use_mp_gru"]:
node_state = self.node_cell(context, node_state, node_incoming, padded_node_table, global_signal)
else:
node_state = node_incoming
# --------------------------------------------------------------------------
# Read from graph
# --------------------------------------------------------------------------
out_read_signals = []
for idx, qry in enumerate(in_read_queries):
out_read_signal, _, a_taps = attention_key_value(
keys =padded_node_ids,
keys_len =node_ids_len,
key_width=node_ids_width,
query=qry,
table=node_state,
name=f"read{idx}"
)
out_read_signals.append(out_read_signal)
for k,v in a_taps.items():
taps[f"read{idx}_{k}"] = v
taps[f"read{idx}_signal"] = out_read_signal
taps[f"read{idx}_query"] = qry
taps["node_state"] = node_state
node_state = dynamic_assert_shape(node_state, node_state_shape, "node_state")
assert node_state.shape[-1] == context.in_node_state.shape[-1], "Node state should not lose dimension"
return out_read_signals, node_state, taps
def node_cell(self, context, node_state, node_incoming, padded_node_table, global_signal):
# --------------------------------------------------------------------------
# Sizes
# --------------------------------------------------------------------------
seq_len = padded_node_table.shape[1]
n_features = self.args["kb_node_width"]
feature_width = self.args["embed_width"]
# --------------------------------------------------------------------------
# Global signal comparison
# --------------------------------------------------------------------------
node_properties = tf.reshape(padded_node_table,
[context.features["d_batch_size"], seq_len, n_features, feature_width])
node_cleanliness = node_properties[:,:,1,:]
# node_cleanliness = node_dense(node_cleanliness, feature_width, activation="selu", name="node_cleanliness")
node_cleanliness_tgt = tf.expand_dims(global_signal, 1)
w1 = tf.get_variable("w1", [1])
w2 = tf.get_variable("w2", [1])
b1 = tf.get_variable("b1", [1])
b2 = tf.get_variable("b2", [1])
node_cleanliness = node_cleanliness * w1 + b1
node_cleanliness_tgt = node_cleanliness_tgt * w2 + b2
node_cleanliness_score = tf.reduce_sum(node_cleanliness * node_cleanliness_tgt, axis=2, keepdims=True)
node_cleanliness_score = dynamic_assert_shape(node_cleanliness_score,
[context.features["d_batch_size"], seq_len, 1])
# node_cleanliness_score = node_dense(node_cleanliness_score, 1, activation="selu", name="node_cleanliness_score")
# --------------------------------------------------------------------------
# RNN Cell
# --------------------------------------------------------------------------
all_inputs = [node_state, node_incoming]
# all_inputs.append(padded_node_table)
# all_inputs.append(tf.tile(tf.expand_dims(global_signal,1), [1, node_state.shape[1], 1]))
all_inputs.append(node_cleanliness_score)
all_inputs = tf.concat(all_inputs, axis=-1)
signals = {}
for s in ["forget"]:
signals[s] = node_dense(all_inputs, context.args["mp_state_width"], activation="sigmoid", name=s+"_signal")
if self.args["use_summary_scalar"]:
tf.summary.histogram("mp_"+s, signals[s])
out_node_state = node_incoming * signals["forget"]
return out_node_state
| 31.758242
| 141
| 0.632641
|
0218062dd8a9e1268fc061e53a57cb713ec09290
| 156
|
py
|
Python
|
funt.py
|
Prashant269/python
|
facf2683c20ace046e8c2adcd7fe96aad609331d
|
[
"bzip2-1.0.6"
] | null | null | null |
funt.py
|
Prashant269/python
|
facf2683c20ace046e8c2adcd7fe96aad609331d
|
[
"bzip2-1.0.6"
] | null | null | null |
funt.py
|
Prashant269/python
|
facf2683c20ace046e8c2adcd7fe96aad609331d
|
[
"bzip2-1.0.6"
] | null | null | null |
def table(x):
i=1
for i in range(1,11):
print ('{}*{}={}'.format(x,i,x*i))
return ''
y=table(5)
print y
| 17.333333
| 50
| 0.378205
|
79ea19da8e13f62754545aa29a5ce706fcdb3cc2
| 7,769
|
py
|
Python
|
alipay/aop/api/request/AlipayUserTradeSearchRequest.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/request/AlipayUserTradeSearchRequest.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/request/AlipayUserTradeSearchRequest.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
class AlipayUserTradeSearchRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._alipay_order_no = None
self._end_time = None
self._merchant_order_no = None
self._order_from = None
self._order_status = None
self._order_type = None
self._page_no = None
self._page_size = None
self._start_time = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def alipay_order_no(self):
return self._alipay_order_no
@alipay_order_no.setter
def alipay_order_no(self, value):
self._alipay_order_no = value
@property
def end_time(self):
return self._end_time
@end_time.setter
def end_time(self, value):
self._end_time = value
@property
def merchant_order_no(self):
return self._merchant_order_no
@merchant_order_no.setter
def merchant_order_no(self, value):
self._merchant_order_no = value
@property
def order_from(self):
return self._order_from
@order_from.setter
def order_from(self, value):
self._order_from = value
@property
def order_status(self):
return self._order_status
@order_status.setter
def order_status(self, value):
self._order_status = value
@property
def order_type(self):
return self._order_type
@order_type.setter
def order_type(self, value):
self._order_type = value
@property
def page_no(self):
return self._page_no
@page_no.setter
def page_no(self, value):
self._page_no = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
@property
def start_time(self):
return self._start_time
@start_time.setter
def start_time(self, value):
self._start_time = value
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.user.trade.search'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.alipay_order_no:
if hasattr(self.alipay_order_no, 'to_alipay_dict'):
params['alipay_order_no'] = json.dumps(obj=self.alipay_order_no.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['alipay_order_no'] = self.alipay_order_no
if self.end_time:
if hasattr(self.end_time, 'to_alipay_dict'):
params['end_time'] = json.dumps(obj=self.end_time.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['end_time'] = self.end_time
if self.merchant_order_no:
if hasattr(self.merchant_order_no, 'to_alipay_dict'):
params['merchant_order_no'] = json.dumps(obj=self.merchant_order_no.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['merchant_order_no'] = self.merchant_order_no
if self.order_from:
if hasattr(self.order_from, 'to_alipay_dict'):
params['order_from'] = json.dumps(obj=self.order_from.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['order_from'] = self.order_from
if self.order_status:
if hasattr(self.order_status, 'to_alipay_dict'):
params['order_status'] = json.dumps(obj=self.order_status.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['order_status'] = self.order_status
if self.order_type:
if hasattr(self.order_type, 'to_alipay_dict'):
params['order_type'] = json.dumps(obj=self.order_type.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['order_type'] = self.order_type
if self.page_no:
if hasattr(self.page_no, 'to_alipay_dict'):
params['page_no'] = json.dumps(obj=self.page_no.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['page_no'] = self.page_no
if self.page_size:
if hasattr(self.page_size, 'to_alipay_dict'):
params['page_size'] = json.dumps(obj=self.page_size.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['page_size'] = self.page_size
if self.start_time:
if hasattr(self.start_time, 'to_alipay_dict'):
params['start_time'] = json.dumps(obj=self.start_time.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['start_time'] = self.start_time
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 31.710204
| 160
| 0.625048
|
c93e598ca6827fa3c26f0d965e1953857fda1679
| 5,483
|
py
|
Python
|
2015/python/2015-11.py
|
robjwells/adventofcode-solutions
|
1c3aa376f1c779a69aa515ce70f0537e13f25eab
|
[
"MIT"
] | null | null | null |
2015/python/2015-11.py
|
robjwells/adventofcode-solutions
|
1c3aa376f1c779a69aa515ce70f0537e13f25eab
|
[
"MIT"
] | null | null | null |
2015/python/2015-11.py
|
robjwells/adventofcode-solutions
|
1c3aa376f1c779a69aa515ce70f0537e13f25eab
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Advent of Code 2015, Day 11: Corporate Policy"""
import string
import aoc
import pytest
def validate_password(password):
"""Check password against the puzzle’s requirements
Passwords:
* must include one increasing straight of at least three
letters, like abc, bcd, cde, and so on, up to xyz.
They cannot skip letters; abd doesn't count.
* may not contain the letters i, o, or l
* must contain at least two different, non-overlapping
pairs of letters, like aa, bb, or zz.
Args:
password (str): The password to validate
Returns:
bool: True if the password satisfies all requirements
"""
windowed = ("".join(t) for t in zip(password, password[1:], password[2:]))
contains_straight = any(w in string.ascii_lowercase for w in windowed)
no_invalid_chars = not any(char in password for char in "iol")
pair_chars = {a for a, b in zip(password, password[1:]) if a == b}
enough_unique_pairs = len(pair_chars) >= 2
return contains_straight and no_invalid_chars and enough_unique_pairs
def clean_bad_letters(password):
"""Return a candidate password after checking for invalid characters
If password doesn't contain the characters i, o, or l it is returned
immediately.
If it does, the string returned is the next potentially valid password
after short-circuiting and skipping passwords containing the invalid
letter in that particular position.
For example:
xi -> xj
xix -> xja
xixyz -> xjaaa
"""
search_results = (password.find(char) for char in "iol")
bad_chars = [x for x in search_results if x != -1]
if not bad_chars:
return password
cut_pos = min(bad_chars)
new_letter = increment_letter(password[cut_pos])
count_a_to_add = len(password[cut_pos:]) - 1
return password[:cut_pos] + new_letter + "a" * count_a_to_add
def increment_letter(letter):
"""Return the character after `letter` in a restricted circular alphabet
This increments a single letter at a time: a becomes b,
z becomes a and so on.
i, o and l are excluded from the alphabet used as they are
not allowed to appear in valid passwords acccording to the
problem description.
It is, however, safe to increment those restricted letters
using this function as a special case is made for them.
"""
restricted_dict = {"i": "j", "l": "m", "o": "p"}
if letter in restricted_dict:
return restricted_dict[letter]
ok_letters = "abcdefghjkmnpqrstuvwxyz"
current_index = ok_letters.index(letter)
is_final_index = current_index == len(ok_letters) - 1
new_index = 0 if is_final_index else current_index + 1
return ok_letters[new_index]
def increment_password(current_pw, index=None):
"""Create a new password by advancing letters in a circular fashion
Only the final letter is incremented (a -> b, z -> a), but earlier
letters will also be incremented if the final one wraps around
(from z to a). This is done by recursively calling increment_password,
with `index` the position to change.
See increment_letter for details on the (restricted) alphabet used.
"""
pw_list = list(current_pw)
increment_index = len(pw_list) - 1 if index is None else index
new_letter = increment_letter(pw_list[increment_index])
pw_list[increment_index] = new_letter
candidate = "".join(pw_list)
if new_letter == "a" and increment_index > 0:
candidate = increment_password(candidate, index=increment_index - 1)
return candidate
def new_password(current_password):
"""Find the next new password starting at current_password
Only valid passwords are returned, with the requirements being:
* must include one increasing straight of at least three
letters, like abc, bcd, cde, and so on, up to xyz.
They cannot skip letters; abd doesn't count.
* may not contain the letters i, o, or l
* must contain at least two different, non-overlapping
pairs of letters, like aa, bb, or zz.
Passwords must also be exactly eight letters long, but the
clear assumption in the problem is that existing passwords
are only ever that length, so there is no specific check
to maintain the eight-character limit (as there is no
specified response).
"""
candidate = clean_bad_letters(current_password)
if candidate == current_password:
candidate = increment_password(candidate)
while not validate_password(candidate):
candidate = increment_password(candidate)
return candidate
@pytest.mark.parametrize(
"invalid_pass",
[
"hijklmmn",
"abbceffg",
"abbcegjk",
],
)
def test_invalid_password(invalid_pass):
assert not validate_password(invalid_pass)
@pytest.mark.parametrize(
"valid_pass",
[
"abcdffaa",
"ghjaabcc",
],
)
def test_valid_password(valid_pass):
assert validate_password(valid_pass)
@pytest.mark.parametrize(
"old,new",
[
("abcdefgh", "abcdffaa"),
("ghijklmn", "ghjaabcc"),
],
)
def test_new_password(old, new):
assert new_password(old) == new
if __name__ == "__main__":
# Part one
puzzle_input = "vzbxkghb"
part_one_pw = new_password(puzzle_input)
print(part_one_pw)
# Part two
print(new_password(part_one_pw))
| 31.511494
| 78
| 0.68375
|
cc71b8f23bce8453e198949e4a42c7aae0c6e16e
| 3,708
|
py
|
Python
|
contrib/macdeploy/custom_dsstore.py
|
ZIBIZ-PROJECT/ZIBIZCORE-WEB
|
436e437f61d19fdf05e6069ae4ccd8e1895f6259
|
[
"MIT"
] | null | null | null |
contrib/macdeploy/custom_dsstore.py
|
ZIBIZ-PROJECT/ZIBIZCORE-WEB
|
436e437f61d19fdf05e6069ae4ccd8e1895f6259
|
[
"MIT"
] | 2
|
2021-05-13T12:26:52.000Z
|
2021-05-13T16:35:51.000Z
|
contrib/macdeploy/custom_dsstore.py
|
ZIBIZ-PROJECT/ZIBIZCORE-WEB
|
436e437f61d19fdf05e6069ae4ccd8e1895f6259
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2013-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': '{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00bitcoinuser:\x00Documents:\x00bitcoin:\x00bitcoin:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/bitcoinuser/Documents/bitcoin/bitcoin/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['zibiz-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| 61.8
| 1,817
| 0.72411
|
9e8ce64af2d12c1dbb7c3f46438f87f53811971e
| 2,068
|
py
|
Python
|
run_tflite_convertor.py
|
jwkanggist/tflite-convertor-example
|
5d54f1ca9214e1b3cdce7530838bac32b6b0c83d
|
[
"Apache-2.0"
] | 6
|
2018-11-13T16:45:52.000Z
|
2020-04-28T01:27:27.000Z
|
run_tflite_convertor.py
|
neties/tflite-convertor-example
|
5d54f1ca9214e1b3cdce7530838bac32b6b0c83d
|
[
"Apache-2.0"
] | 1
|
2018-06-14T16:58:15.000Z
|
2018-06-14T17:04:39.000Z
|
run_tflite_convertor.py
|
neties/tflite-convertor-example
|
5d54f1ca9214e1b3cdce7530838bac32b6b0c83d
|
[
"Apache-2.0"
] | 5
|
2018-09-01T14:40:21.000Z
|
2019-09-22T15:13:37.000Z
|
#-*- coding: utf-8 -*-
#! /usr/bin/env python
'''
filename: run_tflte_convertor.py
description:
- To convert tensorflow frozen graph to tflite format
references:
- https://github.com/tensorflow/tensorflow/blob/master/tensorflow/docs_src/mobile/tflite/devguide.md#2-convert-the-model-format
- https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/lite/toco/g3doc/cmdline_examples.md#savedmodel
author: Jaewook Kang
date : 2018 Apr
'''
import sys
from os import getcwd
sys.path.insert(0, getcwd()+'/tflite-convertor/')
from tflite_convertor import TFliteConvertor
# your frozen graph pb
input_frozen_pb_path = getcwd()+'/pb_and_ckpt/lenet5/frozen_pb_out/'
sys.path.insert(0, input_frozen_pb_path)
# your dir for exporting tflite file
output_tflite_path = getcwd()+'/pb_and_ckpt/lenet5/tflite_out/'
# your dir path for tensorflow source
# where you need to fork tensorflow repo
#
PATH_TENSORFLOW_SRC = '/Users/jwkangmacpro2/SourceCodes/tensorflow/'
# The output/input node names are obtained from Tensorboard
output_node_names = 'model_out/Softmax'
input_node_names = 'input'
# input placeholder shape
input_shape_str = '1,28,28,1'
tflite_convertor = TFliteConvertor()
# tflite config
tflite_convertor.set_config_for_tflite(input_dir_path =input_frozen_pb_path,
output_dir_path =output_tflite_path,
input_pb_file ='frozen_tf_graph_def_lenet5.pb',
output_tflite_file ='tflite_lenet5.tflite',
inference_type ='FLOAT',
input_shape = input_shape_str,
input_array = input_node_names,
output_array = output_node_names,
tf_src_dir_path = PATH_TENSORFLOW_SRC)
# frozen grpah to tflite conversion
tflite_convertor.convert_to_tflite_from_frozen_graph()
| 33.901639
| 135
| 0.652805
|
8c352f8149fc351751382b10600f0e5a47441cc1
| 2,571
|
py
|
Python
|
test/magicmind/op_test/test_stack.py
|
Cambricon/catch
|
2625da389f25a67066d20fb6b0c38250ef98f8ab
|
[
"BSD-2-Clause"
] | 20
|
2022-03-01T11:40:51.000Z
|
2022-03-30T08:17:47.000Z
|
test/magicmind/op_test/test_stack.py
|
Cambricon/catch
|
2625da389f25a67066d20fb6b0c38250ef98f8ab
|
[
"BSD-2-Clause"
] | null | null | null |
test/magicmind/op_test/test_stack.py
|
Cambricon/catch
|
2625da389f25a67066d20fb6b0c38250ef98f8ab
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import print_function
import torch
import torch.nn as nn
import torch_mlu
import torch_mlu.core.mlu_model as ct
from torch.nn import Parameter
import torch.nn.functional as F
import numpy as np
import sys
import os
import time
import unittest
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(cur_dir+"/../../")
from common_utils import testinfo, TestCase
import logging
logging.basicConfig(level=logging.DEBUG)
torch.set_grad_enabled(False)
class TestStackModel(nn.Module):
def __init__(self, dim):
super(TestStackModel, self).__init__()
self.dim = dim
def forward(self, x):
y = torch.stack([x,x,x], self.dim)
return y
class TestStackMixModel(nn.Module):
def __init__(self, dim, shape):
super(TestStackMixModel, self).__init__()
self.dim = dim
self.other = torch.randn(shape)
def forward(self, x):
y = torch.stack([x,x,self.other], self.dim)
return y
class TestStackOp(TestCase):
# @unittest.skip("not test")
def test_stack(self):
shapes = [(2),(2,4),(2,2,4),(2,3,4,4),(2,4,2,2,3)]
dims = [0, 1, 2, 3, 4,5]
for shape in shapes:
shape_len = 2 if isinstance(shape, int) else len(shape)+1
for dim in range(0,shape_len):
model = TestStackModel(dim).eval()
input_x = torch.randn(shape).float()
traced_model = torch.jit.trace(model, input_x, check_trace=False)
out_cpu = model(input_x)
input_x_mlu = input_x.to('mlu')
out_mlu = traced_model(input_x_mlu)
self.assertTensorsEqual(out_cpu, out_mlu.cpu(), 0.0, use_MSE = True)
# Test for fp16
traced_model.half()
out_mlu_fp16 = traced_model(input_x_mlu.half())
out_cpu_fp16 = model(input_x.half().float())
self.assertTensorsEqual(out_cpu_fp16, out_mlu_fp16.cpu(), 0.0, use_MSE = True)
# @unittest.skip("not test")
def test_stack_const(self):
dims = [0, 1, 2, 3]
for dim in dims:
model = TestStackMixModel(dim, (2,4,2)).eval()
input_x = torch.randn((2,4,2)).float()
traced_model = torch.jit.trace(model, input_x, check_trace=False)
input_x_mlu = input_x.to('mlu')
out_cpu = model(input_x)
out_mlu = traced_model(input_x_mlu)
self.assertTensorsEqual(out_cpu, out_mlu.cpu(), 0.0, use_MSE = True)
if __name__ == '__main__':
unittest.main()
| 32.961538
| 94
| 0.614936
|
796ab7f38bd751e635a50dd2fe99e8e81c2840ab
| 2,259
|
py
|
Python
|
third_party/catapult/telemetry/telemetry/internal/forwarders/__init__.py
|
maidiHaitai/haitaibrowser
|
a232a56bcfb177913a14210e7733e0ea83a6b18d
|
[
"BSD-3-Clause"
] | 1
|
2020-09-15T08:43:34.000Z
|
2020-09-15T08:43:34.000Z
|
third_party/catapult/telemetry/telemetry/internal/forwarders/__init__.py
|
maidiHaitai/haitaibrowser
|
a232a56bcfb177913a14210e7733e0ea83a6b18d
|
[
"BSD-3-Clause"
] | null | null | null |
third_party/catapult/telemetry/telemetry/internal/forwarders/__init__.py
|
maidiHaitai/haitaibrowser
|
a232a56bcfb177913a14210e7733e0ea83a6b18d
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
PortPair = collections.namedtuple('PortPair', ['local_port', 'remote_port'])
PortSet = collections.namedtuple('PortSet', ['http', 'https', 'dns'])
class PortPairs(collections.namedtuple('PortPairs', ['http', 'https', 'dns'])):
__slots__ = ()
@classmethod
def Zip(cls, local_ports, remote_ports):
"""Zip a pair of PortSet's into a single PortPairs object."""
with_dns = local_ports.dns is not None and remote_ports.dns is not None
return cls(
PortPair(local_ports.http, remote_ports.http),
PortPair(local_ports.https, remote_ports.https),
PortPair(local_ports.dns, remote_ports.dns) if with_dns else None)
@property
def local_ports(self):
"""Return a tuple of local ports only."""
return PortSet(*[p.local_port if p is not None else None for p in self])
@property
def remote_ports(self):
"""Return a tuple of remote ports only."""
return PortSet(*[p.remote_port if p is not None else None for p in self])
class ForwarderFactory(object):
def Create(self, port_pairs):
"""Creates a forwarder that maps remote (device) <-> local (host) ports.
Args:
port_pairs: A PortPairs instance that consists of a PortPair mapping
for each protocol. http is required. https and dns may be None.
"""
raise NotImplementedError()
@property
def host_ip(self):
return '127.0.0.1'
class Forwarder(object):
def __init__(self, port_pairs):
assert port_pairs.http, 'HTTP port mapping is required.'
self._port_pairs = PortPairs(*[
PortPair(p.local_port, p.remote_port or p.local_port)
if p else None for p in port_pairs])
self._forwarding = True
@property
def host_port(self):
return self._port_pairs.http.remote_port
@property
def host_ip(self):
return '127.0.0.1'
@property
def port_pairs(self):
return self._port_pairs
@property
def url(self):
assert self.host_ip and self.host_port
return 'http://%s:%i' % (self.host_ip, self.host_port)
def Close(self):
self._port_pairs = None
self._forwarding = False
| 28.594937
| 79
| 0.695883
|
884cfdbc2720566163fea2f94b732403fe92e65e
| 4,302
|
py
|
Python
|
conans/test/unittests/tools/cmake/test_cmaketoolchain.py
|
blackliner/conan
|
7848f7fcf1d0ce6e368f1dc05e4b20f40a9203c6
|
[
"MIT"
] | null | null | null |
conans/test/unittests/tools/cmake/test_cmaketoolchain.py
|
blackliner/conan
|
7848f7fcf1d0ce6e368f1dc05e4b20f40a9203c6
|
[
"MIT"
] | null | null | null |
conans/test/unittests/tools/cmake/test_cmaketoolchain.py
|
blackliner/conan
|
7848f7fcf1d0ce6e368f1dc05e4b20f40a9203c6
|
[
"MIT"
] | null | null | null |
import types
import pytest
from mock import Mock
from conan.tools.cmake import CMakeToolchain
from conan.tools.cmake.toolchain import Block, GenericSystemBlock
from conans import ConanFile, Settings
from conans.model.conf import Conf
from conans.model.env_info import EnvValues
@pytest.fixture
def conanfile():
c = ConanFile(Mock(), None)
c.settings = "os", "compiler", "build_type", "arch"
c.initialize(Settings({"os": ["Windows"],
"compiler": {"gcc": {"libcxx": ["libstdc++"]}},
"build_type": ["Release"],
"arch": ["x86"]}), EnvValues())
c.settings.build_type = "Release"
c.settings.arch = "x86"
c.settings.compiler = "gcc"
c.settings.compiler.libcxx = "libstdc++"
c.conf = Conf()
c.folders.set_base_generators(".")
c._conan_node = Mock()
c._conan_node.dependencies = []
return c
def test_cmake_toolchain(conanfile):
toolchain = CMakeToolchain(conanfile)
content = toolchain.content
assert 'set(CMAKE_BUILD_TYPE "Release"' in content
def test_remove(conanfile):
toolchain = CMakeToolchain(conanfile)
toolchain.blocks.remove("generic_system")
content = toolchain.content
assert 'CMAKE_BUILD_TYPE' not in content
def test_template_remove(conanfile):
toolchain = CMakeToolchain(conanfile)
toolchain.blocks["generic_system"].template = ""
content = toolchain.content
assert 'CMAKE_BUILD_TYPE' not in content
def test_template_change(conanfile):
toolchain = CMakeToolchain(conanfile)
tmp = toolchain.blocks["generic_system"].template
toolchain.blocks["generic_system"].template = tmp.replace("CMAKE_BUILD_TYPE", "OTHER_THING")
content = toolchain.content
assert 'set(OTHER_THING "Release"' in content
def test_context_change(conanfile):
toolchain = CMakeToolchain(conanfile)
tmp = toolchain.blocks["generic_system"]
def context(self):
assert self
return {"build_type": "SuperRelease"}
tmp.context = types.MethodType(context, tmp)
content = toolchain.content
assert 'set(CMAKE_BUILD_TYPE "SuperRelease"' in content
def test_context_update(conanfile):
toolchain = CMakeToolchain(conanfile)
build_type = toolchain.blocks["generic_system"].values["build_type"]
toolchain.blocks["generic_system"].values["build_type"] = "Super" + build_type
content = toolchain.content
assert 'set(CMAKE_BUILD_TYPE "SuperRelease"' in content
def test_context_replace(conanfile):
toolchain = CMakeToolchain(conanfile)
toolchain.blocks["generic_system"].values = {"build_type": "SuperRelease"}
content = toolchain.content
assert 'set(CMAKE_BUILD_TYPE "SuperRelease"' in content
def test_replace_block(conanfile):
toolchain = CMakeToolchain(conanfile)
class MyBlock(Block):
template = "HelloWorld"
def context(self):
return {}
toolchain.blocks["generic_system"] = MyBlock
content = toolchain.content
assert 'HelloWorld' in content
assert 'CMAKE_BUILD_TYPE' not in content
def test_add_new_block(conanfile):
toolchain = CMakeToolchain(conanfile)
class MyBlock(Block):
template = "Hello {{myvar}}!!!"
def context(self):
return {"myvar": "World"}
toolchain.blocks["mynewblock"] = MyBlock
content = toolchain.content
assert 'Hello World!!!' in content
assert 'CMAKE_BUILD_TYPE' in content
def test_extend_block(conanfile):
toolchain = CMakeToolchain(conanfile)
class MyBlock(GenericSystemBlock):
template = "Hello {{build_type}}!!"
def context(self):
c = super(MyBlock, self).context()
c["build_type"] = c["build_type"] + "Super"
return c
toolchain.blocks["generic_system"] = MyBlock
content = toolchain.content
assert 'Hello ReleaseSuper!!' in content
assert 'CMAKE_BUILD_TYPE' not in content
def test_user_toolchain(conanfile):
toolchain = CMakeToolchain(conanfile)
toolchain.blocks["user_toolchain"].user_toolchain = "myowntoolchain.cmake"
content = toolchain.content
assert 'include(myowntoolchain.cmake)' in content
toolchain = CMakeToolchain(conanfile)
content = toolchain.content
assert 'include(' not in content
| 30.083916
| 96
| 0.693631
|
a9c95062446adc178d47745e215af1c6ae6bf12a
| 4,784
|
py
|
Python
|
ROAR/perception_module/ground_plane_detector.py
|
RyanC1681/RCAI1122
|
c9683110b58c255a7a78d880ff73df7ff2329405
|
[
"Apache-2.0"
] | 18
|
2020-10-16T00:38:55.000Z
|
2022-03-03T06:01:49.000Z
|
ROAR/perception_module/ground_plane_detector.py
|
RyanC1681/RCAI1122
|
c9683110b58c255a7a78d880ff73df7ff2329405
|
[
"Apache-2.0"
] | 20
|
2020-07-23T03:50:50.000Z
|
2021-11-09T04:00:26.000Z
|
ROAR/perception_module/ground_plane_detector.py
|
RyanC1681/RCAI1122
|
c9683110b58c255a7a78d880ff73df7ff2329405
|
[
"Apache-2.0"
] | 140
|
2019-11-20T22:46:02.000Z
|
2022-03-29T13:26:17.000Z
|
from ROAR.agent_module.agent import Agent
from ROAR.perception_module.depth_to_pointcloud_detector import DepthToPointCloudDetector
import numpy as np
from typing import Optional, Any
import open3d as o3d
import time, cv2
class GroundPlaneDetector(DepthToPointCloudDetector):
def __init__(self, agent: Agent, knn: int = 200, res: int = 4, **kwargs):
super().__init__(agent, **kwargs)
self.reference_norm: Optional[np.ndarray] = np.array([-0.00000283, -0.00012446, 0.99999999])
self.knn = knn
self.res = res
self.f1, self.f2, self.f3, self.f4 = self.compute_vectors_near_me(res)
self.threshold = 0.15
def run_in_series(self) -> Any:
if self.agent.kwargs.get("point_cloud", None) is not None:
try:
points: np.ndarray = self.agent.kwargs.get("point_cloud").copy()
x = points[self.f3, :] - points[self.f4, :]
y = points[self.f1, :] - points[self.f2, :]
normals = self.normalize_v3(np.cross(x, y))
# OpenCV FloodFill
d1 = h = self.agent.front_depth_camera.image_size_y
d2 = w = self.agent.front_depth_camera.image_size_x
curr_img = normals.reshape((int(d1/self.res), int(d2/self.res), 3)).astype(np.float32)
min_x, max_x = 0, h // self.res
min_y, max_y = w * 3 // 4 // self.res, w
# Y_norm_array: np.ndarray = curr_img[min_x:max_x, min_y:max_y, 1]
# x, y = np.unravel_index(np.argmax(Y_norm_array), np.shape(Y_norm_array))
# seed_w, seed_h = y + min_y, x + min_x
# print(seed_w, seed_h, np.shape(curr_img))
seed_point = (int(d1/self.res) - 10, int(int(d2/self.res) / 2))
_, retval, _, _ = cv2.floodFill(image=curr_img,
seedPoint=seed_point,
newVal=(0, 0, 0),
loDiff=(self.threshold,self.threshold,self.threshold),
upDiff=(self.threshold,self.threshold,self.threshold),
mask=None,
flags=8)
bool_matrix = np.mean(retval, axis=2) == 0
bool_zeros = np.zeros(d1 * d2).flatten()
bool_indices = np.indices(bool_zeros.shape)[0][::self.res**2]
bool_zeros[bool_indices] = bool_matrix.flatten()
bool_matrix = bool_zeros.reshape((d1, d2))
color_image = self.agent.front_rgb_camera.data.copy()
color_image[bool_matrix > 0] = 255
cv2.imshow('Color', color_image)
cv2.waitKey(1)
except Exception as e:
self.logger.error(e)
@staticmethod
def construct_pointcloud(points) -> o3d.geometry.PointCloud:
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
pcd.estimate_normals()
return pcd
def compute_reference_norm(self, pcd: o3d.geometry.PointCloud):
pcd_tree = o3d.geometry.KDTreeFlann(pcd) # build KD tree for fast computation
[k, idx, _] = pcd_tree.search_knn_vector_3d(self.agent.vehicle.transform.location.to_array(),
knn=self.knn) # find points around me
points_near_me = np.asarray(pcd.points)[idx, :] # 200 x 3
u, s, vh = np.linalg.svd(points_near_me, full_matrices=False) # use svd to find normals of points
self.reference_norm = vh[2, :]
@staticmethod
def normalize_v3(arr):
lens = np.sqrt(arr[:, 0] ** 2 + arr[:, 1] ** 2 + arr[:, 2] ** 2)
lens[lens <= 0] = 1
arr[:, 0] /= lens
arr[:, 1] /= lens
arr[:, 2] /= lens
return arr
def compute_vectors_near_me(self, res):
d1, d2 = self.agent.front_depth_camera.image_size_y, self.agent.front_depth_camera.image_size_x
idx, jdx = np.indices((d1, d2))
idx_back = np.clip(idx - 1, 0, idx.max()).flatten()
idx_front = np.clip(idx + 1, 0, idx.max()).flatten()
jdx_back = np.clip(jdx - 1, 0, jdx.max()).flatten()
jdx_front = np.clip(jdx + 1, 0, jdx.max()).flatten()
idx = idx.flatten()
jdx = jdx.flatten()
# rand_idx = np.random.choice(np.arange(idx.shape[0]), size=d1*d2, replace=False)
f1 = (idx_front * d2 + jdx)[::res**2] # [rand_idx]
f2 = (idx_back * d2 + jdx)[::res**2] # [rand_idx]
f3 = (idx * d2 + jdx_front)[::res**2] # [rand_idx]
f4 = (idx * d2 + jdx_back)[::res**2] # [rand_idx]
return f1, f2, f3, f4
| 48.323232
| 106
| 0.549749
|
836a5abce814977f2c7eef91760399d9644f1bde
| 2,469
|
py
|
Python
|
sejong-oneline-festival/controller/sejong_auth.py
|
denhur62/sejong-online-festival
|
69fbe16ff5ab4f97ff3cb298ce8d2a62d8f787fc
|
[
"MIT"
] | null | null | null |
sejong-oneline-festival/controller/sejong_auth.py
|
denhur62/sejong-online-festival
|
69fbe16ff5ab4f97ff3cb298ce8d2a62d8f787fc
|
[
"MIT"
] | null | null | null |
sejong-oneline-festival/controller/sejong_auth.py
|
denhur62/sejong-online-festival
|
69fbe16ff5ab4f97ff3cb298ce8d2a62d8f787fc
|
[
"MIT"
] | 4
|
2021-09-28T09:13:19.000Z
|
2022-01-10T13:16:05.000Z
|
import requests
from bs4 import BeautifulSoup as bs
class SejongAuth:
def __init__(self):
self.TIMEOUT_SEC = 10
def do_sejong(self, id: str, pw: str):
header = {
"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5)\
AppleWebKit 537.36 (KHTML, like Gecko) Chrome",
"Accept":"text/html,application/xhtml+xml,application/xml;\
q=0.9,imgwebp,*/*;q=0.8"
}
data = {
'email': id,
'password': pw
}
with requests.Session() as s:
html = s.post(
"https://do.sejong.ac.kr/ko/process/member/login",
headers=header, data=data, timeout=self.TIMEOUT_SEC
).content
html = s.get(
"https://do.sejong.ac.kr/",
timeout=self.TIMEOUT_SEC
).text
soup = bs(html, "html.parser")
soup = soup.select("div.info")
if soup == []:
return {"result": False}
name = soup[0].find("b").get_text().strip()
major = soup[0].find("small").get_text().strip().split(" ")[1]
return {
"result": True,
"name": name,
"id": id,
"major": major
}
def portal_sejong(self, id: str, pw: str):
header = {
"Referer": "https://portal.sejong.ac.kr",
"User-Agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0"
}
data = {
"id": id,
"password": pw,
'rtUrl': '',
}
with requests.Session() as s:
s.post(
'https://portal.sejong.ac.kr/jsp/login/login_action.jsp',
headers=header, data=data, timeout=self.TIMEOUT_SEC
)
res = s.get('https://portal.sejong.ac.kr/main.jsp', timeout=self.TIMEOUT_SEC)
soup = bs(res.content, 'html.parser')
name = soup.select_one('div.info0 > div')
if name is None:
return {"result":False}
name = name.get_text().split("(")[0]
return {
"result": True,
"name": name,
"id": id,
}
if __name__ == '__main__':
auth = SejongAuth()
id, pw = "16011089", "!hkw45799"
print(auth.do_sejong(id, pw))
print(auth.portal_sejong(id, pw))
| 32.064935
| 105
| 0.476711
|
0e0649003276f3cdf628badddf2fb90624867f33
| 5,101
|
py
|
Python
|
octavia-cli/octavia_cli/generate/definitions.py
|
kattos-aws/airbyte
|
cbcbab4a2399c08d8f66d1b693ac824c245ba3da
|
[
"MIT"
] | null | null | null |
octavia-cli/octavia_cli/generate/definitions.py
|
kattos-aws/airbyte
|
cbcbab4a2399c08d8f66d1b693ac824c245ba3da
|
[
"MIT"
] | 1
|
2021-12-08T21:39:05.000Z
|
2021-12-09T17:10:45.000Z
|
octavia-cli/octavia_cli/generate/definitions.py
|
kattos-aws/airbyte
|
cbcbab4a2399c08d8f66d1b693ac824c245ba3da
|
[
"MIT"
] | 1
|
2022-02-19T17:22:50.000Z
|
2022-02-19T17:22:50.000Z
|
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import abc
from typing import Any, Callable, Union
import airbyte_api_client
import click
from airbyte_api_client.api import (
destination_definition_api,
destination_definition_specification_api,
source_definition_api,
source_definition_specification_api,
)
from airbyte_api_client.exceptions import ApiException
from airbyte_api_client.model.destination_definition_id_request_body import DestinationDefinitionIdRequestBody
from airbyte_api_client.model.destination_definition_id_with_workspace_id import DestinationDefinitionIdWithWorkspaceId
from airbyte_api_client.model.source_definition_id_request_body import SourceDefinitionIdRequestBody
from airbyte_api_client.model.source_definition_id_with_workspace_id import SourceDefinitionIdWithWorkspaceId
class DefinitionNotFoundError(click.ClickException):
pass
class BaseDefinition(abc.ABC):
COMMON_GET_FUNCTION_KWARGS = {"_check_return_type": False}
specification = None
@property
@abc.abstractmethod
def api(
self,
): # pragma: no cover
pass
@property
@abc.abstractmethod
def type(
self,
): # pragma: no cover
pass
@property
@abc.abstractmethod
def get_function_name(
self,
): # pragma: no cover
pass
@property
def _get_fn(self) -> Callable:
return getattr(self.api, self.get_function_name)
@property
def _get_fn_kwargs(self) -> dict:
return {}
def __init__(self, api_client: airbyte_api_client.ApiClient, id: str) -> None:
self.id = id
self.api_instance = self.api(api_client)
self._api_data = self._read()
def _read(self) -> dict:
try:
return self._get_fn(self.api_instance, **self._get_fn_kwargs, **self.COMMON_GET_FUNCTION_KWARGS)
except ApiException as e:
if e.status in [422, 404]:
raise DefinitionNotFoundError(f"Definition {self.id} does not exists on your Airbyte instance.")
raise e
def __getattr__(self, name: str) -> Any:
"""Map attribute of the API response to the BaseDefinition object.
Args:
name (str): Attribute name
Raises:
AttributeError: Raised if the attributed was not found in the API response payload.
Returns:
[Any]: Attribute value
"""
if name in self._api_data:
return self._api_data.get(name)
raise AttributeError(f"{self.__class__.__name__}.{name} is invalid.")
class ConnectionDefinition(BaseDefinition):
type = "connection"
class SourceDefinition(BaseDefinition):
api = source_definition_api.SourceDefinitionApi
type = "source"
get_function_name = "get_source_definition"
@property
def _get_fn_kwargs(self) -> dict:
return {"source_definition_id_request_body": SourceDefinitionIdRequestBody(self.id)}
class DestinationDefinition(BaseDefinition):
api = destination_definition_api.DestinationDefinitionApi
type = "destination"
get_function_name = "get_destination_definition"
@property
def _get_fn_kwargs(self) -> dict:
return {"destination_definition_id_request_body": DestinationDefinitionIdRequestBody(self.id)}
class DefinitionSpecification(BaseDefinition):
def __init__(self, api_client: airbyte_api_client.ApiClient, workspace_id: str, id: str) -> None:
self.workspace_id = workspace_id
super().__init__(api_client, id)
class SourceDefinitionSpecification(DefinitionSpecification):
api = source_definition_specification_api.SourceDefinitionSpecificationApi
type = "source"
get_function_name = "get_source_definition_specification"
@property
def _get_fn_kwargs(self) -> dict:
return {"source_definition_id_with_workspace_id": SourceDefinitionIdWithWorkspaceId(self.id, self.workspace_id)}
class DestinationDefinitionSpecification(DefinitionSpecification):
api = destination_definition_specification_api.DestinationDefinitionSpecificationApi
type = "destination"
get_function_name = "get_destination_definition_specification"
@property
def _get_fn_kwargs(self) -> dict:
return {"destination_definition_id_with_workspace_id": DestinationDefinitionIdWithWorkspaceId(self.id, self.workspace_id)}
def factory(
definition_type: str, api_client: airbyte_api_client.ApiClient, workspace_id: str, definition_id: str
) -> Union[SourceDefinition, DestinationDefinition]:
if definition_type == "source":
definition = SourceDefinition(api_client, definition_id)
specification = SourceDefinitionSpecification(api_client, workspace_id, definition_id)
elif definition_type == "destination":
definition = DestinationDefinition(api_client, definition_id)
specification = DestinationDefinitionSpecification(api_client, workspace_id, definition_id)
else:
raise ValueError(f"{definition_type} does not exist")
definition.specification = specification
return definition
| 33.123377
| 130
| 0.738483
|
d51b36eda668c5c1d18d56e2ea9a827d6304e79f
| 2,686
|
py
|
Python
|
noxfile.py
|
decalage2/Ciphey
|
ebe22af0a2ab5c21aaaa3913f8ff20e10149ca9e
|
[
"MIT"
] | 1
|
2020-10-28T18:37:23.000Z
|
2020-10-28T18:37:23.000Z
|
noxfile.py
|
decalage2/Ciphey
|
ebe22af0a2ab5c21aaaa3913f8ff20e10149ca9e
|
[
"MIT"
] | null | null | null |
noxfile.py
|
decalage2/Ciphey
|
ebe22af0a2ab5c21aaaa3913f8ff20e10149ca9e
|
[
"MIT"
] | 1
|
2021-09-18T13:21:00.000Z
|
2021-09-18T13:21:00.000Z
|
"""
The file for Nox
"""
import nox
from typing import Any
from nox.sessions import Session
import tempfile
locations = "ciphey/", "tests/", "docs/"
nox.options.sessions = "safety", "tests"
package = "ciphey"
def install_with_constraints(session: Session, *args: str, **kwargs: Any) -> None:
"""Install packages constrained by Poetry's lock file.
This function is a wrapper for nox.sessions.Session.install. It
invokes pip to install packages inside of the session's virtualenv.
Additionally, pip is passed a constraints file generated from
Poetry's lock file, to ensure that the packages are pinned to the
versions specified in poetry.lock. This allows you to manage the
packages as Poetry development dependencies.
Arguments:
session: The Session object.
args: Command-line arguments for pip.
kwargs: Additional keyword arguments for Session.install.
"""
with tempfile.NamedTemporaryFile() as requirements:
session.run(
"poetry",
"export",
"--dev",
"--format=requirements.txt",
f"--output={requirements.name}",
external=True,
)
session.install(f"--constraint={requirements.name}", *args, **kwargs)
# noxfile.py
@nox.session(python="3.8")
def black(session):
args = session.posargs or locations
session.install("black")
session.run("black", *args)
@nox.session(python="3.8")
def safety(session):
with tempfile.NamedTemporaryFile() as requirements:
session.run(
"poetry",
"export",
"--dev",
"--format=requirements.txt",
"--without-hashes",
f"--output={requirements.name}",
external=True,
)
install_with_constraints(session, "safety")
session.run("safety", "check", f"--file={requirements.name}", "--full-report")
@nox.session(python="3.8")
def coverage(session: Session) -> None:
"""Upload coverage data."""
install_with_constraints(session, "coverage[toml]", "codecov")
session.run("pip3", "install", "cipheydists")
session.run("coverage", "xml", "--fail-under=0")
session.run("codecov", *session.posargs)
# noxfile.py
@nox.session(python="3.8")
def docs(session: Session) -> None:
"""Build the documentation."""
install_with_constraints(session, "sphinx")
session.run("sphinx-build", "docs", "docs/_build")
# python=["3.8", "3.7", "3.6"])
@nox.session(python="3.8")
def tests(session):
session.run("pip3", "install", "cipheydists")
session.run("poetry", "install", external=True)
session.run("poetry", "run", "pytest", "--cov=ciphey")
| 31.232558
| 86
| 0.638124
|
d8aa6226743afa65bd3766c6ad8d2fc0f139f441
| 8,023
|
py
|
Python
|
prednet_custom.py
|
pallekc91/prednet
|
a7761a92c98c1652a2559a2b84c7f0a371f8e5c4
|
[
"MIT"
] | null | null | null |
prednet_custom.py
|
pallekc91/prednet
|
a7761a92c98c1652a2559a2b84c7f0a371f8e5c4
|
[
"MIT"
] | null | null | null |
prednet_custom.py
|
pallekc91/prednet
|
a7761a92c98c1652a2559a2b84c7f0a371f8e5c4
|
[
"MIT"
] | null | null | null |
from keras.layers import Recurrent
from keras.engine import InputSpec
from keras.layers import Conv2D, UpSampling2D, MaxPooling2D
from keras import backend
from keras import activations
class PredNetCustom(Recurrent):
def __init__(self, channels_a, channels_r, glob_filter_size, output_mode='error', data_format=backend.image_data_format(), **kwargs):
super(PredNetCustom, self).__init__(**kwargs)
self.conv_layers = {c: [] for c in ['i', 'f', 'c', 'o', 'a', 'ahat']}
self.channels_a = channels_a # size = n (first is the input size, followed by n-1 nchannels
self.channels_r = channels_r # size = n
self.layer_size = len(channels_r)
self.glob_filter_size = glob_filter_size
self.output_mode = output_mode
assert len(channels_a) == len(self.channels_r), 'channels in a and r should be equals, please check your arguments'
assert data_format in {'channels_last',
'channels_first'}, 'data_format must be in {channels_last, channels_first}'
self.data_format = data_format
self.channel_axis = -3 if data_format == 'channels_first' else -1
self.row_axis = -2 if data_format == 'channels_first' else -3
self.column_axis = -1 if data_format == 'channels_first' else -2
for i in range(self.layer_size):
for c in ['i', 'f', 'c', 'o']:
act = 'tanh' if c == 'c' else 'sigmoid'
self.conv_layers[c].append(Conv2D(self.channels_r[i], self.glob_filter_size, padding='same', activation=act, data_format=self.data_format))
self.conv_layers['ahat'].append(Conv2D(self.channels_a[i], self.glob_filter_size, padding='same', activation='relu', data_format=self.data_format))
for i in range(1, self.layer_size):
self.conv_layers['a'].append(Conv2D(self.channels_a[i], self.glob_filter_size, padding='same', activation='relu', data_format=self.data_format))
self.upsample = UpSampling2D(data_format=self.data_format)
self.pool = MaxPooling2D(data_format=self.data_format)
self.input_spec = [InputSpec(ndim=5)]
def compute_output_shape(self, input_shape):
if self.output_mode == 'prediction':
out_shape = input_shape[2:]
elif self.output_mode == 'error':
out_shape = (self.layer_size,)
if self.return_sequences:
return (input_shape[0], input_shape[1]) + out_shape
else:
return (input_shape[0],) + out_shape
def get_initial_state(self, x):
input_shape = self.input_spec[0].shape
init_nb_row = input_shape[self.row_axis]
init_nb_col = input_shape[self.column_axis]
base_initial_state = backend.zeros_like(x)
non_channel_axis = -1 if self.data_format == 'channels_first' else -2
for _ in range(2):
base_initial_state = backend.sum(base_initial_state, axis=non_channel_axis)
base_initial_state = backend.sum(base_initial_state, axis=1) # (samples, nb_channels)
states_to_pass = ['r', 'c', 'e']
initial_states = []
nlayers_to_pass = {u: self.layer_size for u in states_to_pass}
for u in states_to_pass:
for l in range(nlayers_to_pass[u]):
ds_factor = 2 ** l
nb_row = init_nb_row // ds_factor
nb_col = init_nb_col // ds_factor
if u in ['r', 'c']:
stack_size = self.channels_r[l]
elif u == 'e':
stack_size = 2 * self.channels_a[l]
elif u == 'ahat':
stack_size = self.channels_a[l]
output_size = stack_size * nb_row * nb_col # flattened size
reducer = backend.zeros((input_shape[self.channel_axis], output_size)) # (nb_channels, output_size)
initial_state = backend.dot(base_initial_state, reducer) # (samples, output_size)
if self.data_format == 'channels_first':
output_shp = (-1, stack_size, nb_row, nb_col)
else:
output_shp = (-1, nb_row, nb_col, stack_size)
initial_state = backend.reshape(initial_state, output_shp)
initial_states += [initial_state]
return initial_states
def build(self, input_shape):
#recursively calling build on all its layers
self.input_spec = [InputSpec(shape=input_shape)]
self.trainable_weights = []
nb_row, nb_col = (input_shape[-2], input_shape[-1]) if self.data_format == 'channels_first' else (input_shape[-3], input_shape[-2])
for c in sorted(self.conv_layers.keys()):
for l in range(len(self.conv_layers[c])):
ds_factor = 2 ** l
if c == 'ahat':
nb_channels = self.channels_r[l]
elif c == 'a':
nb_channels = 2 * self.channels_a[l]
else:
nb_channels = self.channels_a[l] * 2 + self.channels_r[l]
if l < self.layer_size - 1:
nb_channels += self.channels_r[l + 1]
in_shape = (input_shape[0], nb_channels, nb_row // ds_factor, nb_col // ds_factor)
if self.data_format == 'channels_last': in_shape = (in_shape[0], in_shape[2], in_shape[3], in_shape[1])
with backend.name_scope('layer_' + c + '_' + str(l)):
self.conv_layers[c][l].build(in_shape)
self.trainable_weights += self.conv_layers[c][l].trainable_weights
self.states = [None] * self.layer_size * 3
def step(self, a, states):
r_tm1 = states[:self.layer_size]
c_tm1 = states[self.layer_size:2 * self.layer_size]
e_tm1 = states[2 * self.layer_size:3 * self.layer_size]
c = []
r = []
e = []
for l in reversed(range(self.layer_size)):
inputs = [r_tm1[l], e_tm1[l]]
if l < self.layer_size - 1:
inputs.append(r_up)
inputs = backend.concatenate(inputs, axis=self.channel_axis)
i = self.conv_layers['i'][l].call(inputs)
f = self.conv_layers['f'][l].call(inputs)
o = self.conv_layers['o'][l].call(inputs)
_c = f * c_tm1[l] + i * self.conv_layers['c'][l].call(inputs)
_r = o * activations.get('tanh')(_c)
c.insert(0, _c)
r.insert(0, _r)
if l > 0:
r_up = self.upsample.call(_r)
for l in range(self.layer_size):
ahat = self.conv_layers['ahat'][l].call(r[l])
if l == 0:
ahat = backend.minimum(ahat,1.)
frame_prediction = ahat
# compute errors
e_up = activations.get('relu')(ahat - a)
e_down = activations.get('relu')(a - ahat)
e.append(backend.concatenate((e_up, e_down), axis=self.channel_axis))
output = ahat
if l < self.layer_size - 1:
a = self.conv_layers['a'][l].call(e[l])
a = self.pool.call(a) # target for next layer
states = r + c + e
if self.output_mode == 'prediction':
output = frame_prediction
else:
for l in range(self.layer_size):
layer_error = backend.mean(backend.batch_flatten(e[l]), axis=-1, keepdims=True)
all_error = layer_error if l == 0 else backend.concatenate((all_error, layer_error), axis=-1)
if self.output_mode == 'error':
output = all_error
return output, states
def get_config(self):
config = {'Channels in a': self.channels_a,
'Channels in r': self.channels_r,
'Global filter size': self.glob_filter_size,
'data_format' : self.data_format}
base_config = super(PredNetCustom, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 48.624242
| 159
| 0.585691
|
86018c7988b0566b7790984ae898b9c5c0e1507f
| 12,681
|
py
|
Python
|
electrum_ltc/synchronizer.py
|
BITRY/vialectrum
|
2ed2902ebc2af24b9c32d774fa4a32cbda60a9e5
|
[
"MIT"
] | 11
|
2016-01-17T04:14:58.000Z
|
2018-01-23T10:53:40.000Z
|
electrum_ltc/synchronizer.py
|
BITRY/vialectrum
|
2ed2902ebc2af24b9c32d774fa4a32cbda60a9e5
|
[
"MIT"
] | 17
|
2015-01-11T13:37:21.000Z
|
2018-05-16T10:10:09.000Z
|
electrum_ltc/synchronizer.py
|
BITRY/vialectrum
|
2ed2902ebc2af24b9c32d774fa4a32cbda60a9e5
|
[
"MIT"
] | 13
|
2016-09-29T13:41:09.000Z
|
2018-05-12T15:32:28.000Z
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2014 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import hashlib
from typing import Dict, List, TYPE_CHECKING, Tuple
from collections import defaultdict
import logging
from aiorpcx import TaskGroup, run_in_thread, RPCError
from . import util
from .transaction import Transaction, PartialTransaction
from .util import bh2u, make_aiohttp_session, NetworkJobOnDefaultServer, random_shuffled_copy
from .bitcoin import address_to_scripthash, is_address
from .network import UntrustedServerReturnedError
from .logging import Logger
from .interface import GracefulDisconnect
if TYPE_CHECKING:
from .network import Network
from .address_synchronizer import AddressSynchronizer
class SynchronizerFailure(Exception): pass
def history_status(h):
if not h:
return None
status = ''
for tx_hash, height in h:
status += tx_hash + ':%d:' % height
return bh2u(hashlib.sha256(status.encode('ascii')).digest())
class SynchronizerBase(NetworkJobOnDefaultServer):
"""Subscribe over the network to a set of addresses, and monitor their statuses.
Every time a status changes, run a coroutine provided by the subclass.
"""
def __init__(self, network: 'Network'):
self.asyncio_loop = network.asyncio_loop
self._reset_request_counters()
NetworkJobOnDefaultServer.__init__(self, network)
def _reset(self):
super()._reset()
self.requested_addrs = set()
self.scripthash_to_address = {}
self._processed_some_notifications = False # so that we don't miss them
self._reset_request_counters()
# Queues
self.add_queue = asyncio.Queue()
self.status_queue = asyncio.Queue()
async def _start_tasks(self):
try:
async with self.taskgroup as group:
await group.spawn(self.send_subscriptions())
await group.spawn(self.handle_status())
await group.spawn(self.main())
finally:
# we are being cancelled now
self.session.unsubscribe(self.status_queue)
def _reset_request_counters(self):
self._requests_sent = 0
self._requests_answered = 0
def add(self, addr):
asyncio.run_coroutine_threadsafe(self._add_address(addr), self.asyncio_loop)
async def _add_address(self, addr: str):
if not is_address(addr): raise ValueError(f"invalid bitcoin address {addr}")
if addr in self.requested_addrs: return
self.requested_addrs.add(addr)
await self.add_queue.put(addr)
async def _on_address_status(self, addr, status):
"""Handle the change of the status of an address."""
raise NotImplementedError() # implemented by subclasses
async def send_subscriptions(self):
async def subscribe_to_address(addr):
h = address_to_scripthash(addr)
self.scripthash_to_address[h] = addr
self._requests_sent += 1
try:
await self.session.subscribe('blockchain.scripthash.subscribe', [h], self.status_queue)
except RPCError as e:
if e.message == 'history too large': # no unique error code
raise GracefulDisconnect(e, log_level=logging.ERROR) from e
raise
self._requests_answered += 1
self.requested_addrs.remove(addr)
while True:
addr = await self.add_queue.get()
await self.taskgroup.spawn(subscribe_to_address, addr)
async def handle_status(self):
while True:
h, status = await self.status_queue.get()
addr = self.scripthash_to_address[h]
await self.taskgroup.spawn(self._on_address_status, addr, status)
self._processed_some_notifications = True
def num_requests_sent_and_answered(self) -> Tuple[int, int]:
return self._requests_sent, self._requests_answered
async def main(self):
raise NotImplementedError() # implemented by subclasses
class Synchronizer(SynchronizerBase):
'''The synchronizer keeps the wallet up-to-date with its set of
addresses and their transactions. It subscribes over the network
to wallet addresses, gets the wallet to generate new addresses
when necessary, requests the transaction history of any addresses
we don't have the full history of, and requests binary transaction
data of any transactions the wallet doesn't have.
'''
def __init__(self, wallet: 'AddressSynchronizer'):
self.wallet = wallet
SynchronizerBase.__init__(self, wallet.network)
def _reset(self):
super()._reset()
self.requested_tx = {}
self.requested_histories = set()
def diagnostic_name(self):
return self.wallet.diagnostic_name()
def is_up_to_date(self):
return (not self.requested_addrs
and not self.requested_histories
and not self.requested_tx)
async def _on_address_status(self, addr, status):
history = self.wallet.db.get_addr_history(addr)
if history_status(history) == status:
return
if (addr, status) in self.requested_histories:
return
# request address history
self.requested_histories.add((addr, status))
h = address_to_scripthash(addr)
self._requests_sent += 1
result = await self.network.get_history_for_scripthash(h)
self._requests_answered += 1
self.logger.info(f"receiving history {addr} {len(result)}")
hashes = set(map(lambda item: item['tx_hash'], result))
hist = list(map(lambda item: (item['tx_hash'], item['height']), result))
# tx_fees
for item in result:
if item['height'] in (-1, 0) and 'fee' not in item:
raise Exception("server response to get_history contains unconfirmed tx without fee")
tx_fees = [(item['tx_hash'], item.get('fee')) for item in result]
tx_fees = dict(filter(lambda x:x[1] is not None, tx_fees))
# Check that txids are unique
if len(hashes) != len(result):
self.logger.info(f"error: server history has non-unique txids: {addr}")
# Check that the status corresponds to what was announced
elif history_status(hist) != status:
self.logger.info(f"error: status mismatch: {addr}")
else:
# Store received history
self.wallet.receive_history_callback(addr, hist, tx_fees)
# Request transactions we don't have
await self._request_missing_txs(hist)
# Remove request; this allows up_to_date to be True
self.requested_histories.discard((addr, status))
async def _request_missing_txs(self, hist, *, allow_server_not_finding_tx=False):
# "hist" is a list of [tx_hash, tx_height] lists
transaction_hashes = []
for tx_hash, tx_height in hist:
if tx_hash in self.requested_tx:
continue
tx = self.wallet.db.get_transaction(tx_hash)
if tx and not isinstance(tx, PartialTransaction):
continue # already have complete tx
transaction_hashes.append(tx_hash)
self.requested_tx[tx_hash] = tx_height
if not transaction_hashes: return
async with TaskGroup() as group:
for tx_hash in transaction_hashes:
await group.spawn(self._get_transaction(tx_hash, allow_server_not_finding_tx=allow_server_not_finding_tx))
async def _get_transaction(self, tx_hash, *, allow_server_not_finding_tx=False):
self._requests_sent += 1
try:
raw_tx = await self.network.get_transaction(tx_hash)
except UntrustedServerReturnedError as e:
# most likely, "No such mempool or blockchain transaction"
if allow_server_not_finding_tx:
self.requested_tx.pop(tx_hash)
return
else:
raise
finally:
self._requests_answered += 1
tx = Transaction(raw_tx)
if tx_hash != tx.txid():
raise SynchronizerFailure(f"received tx does not match expected txid ({tx_hash} != {tx.txid()})")
tx_height = self.requested_tx.pop(tx_hash)
self.wallet.receive_tx_callback(tx_hash, tx, tx_height)
self.logger.info(f"received tx {tx_hash} height: {tx_height} bytes: {len(raw_tx)}")
# callbacks
util.trigger_callback('new_transaction', self.wallet, tx)
async def main(self):
self.wallet.set_up_to_date(False)
# request missing txns, if any
for addr in self.wallet.db.get_history():
history = self.wallet.db.get_addr_history(addr)
# Old electrum servers returned ['*'] when all history for the address
# was pruned. This no longer happens but may remain in old wallets.
if history == ['*']: continue
await self._request_missing_txs(history, allow_server_not_finding_tx=True)
# add addresses to bootstrap
for addr in random_shuffled_copy(self.wallet.get_addresses()):
await self._add_address(addr)
# main loop
while True:
await asyncio.sleep(0.1)
await run_in_thread(self.wallet.synchronize)
up_to_date = self.is_up_to_date()
if (up_to_date != self.wallet.is_up_to_date()
or up_to_date and self._processed_some_notifications):
self._processed_some_notifications = False
if up_to_date:
self._reset_request_counters()
self.wallet.set_up_to_date(up_to_date)
util.trigger_callback('wallet_updated', self.wallet)
class Notifier(SynchronizerBase):
"""Watch addresses. Every time the status of an address changes,
an HTTP POST is sent to the corresponding URL.
"""
def __init__(self, network):
SynchronizerBase.__init__(self, network)
self.watched_addresses = defaultdict(list) # type: Dict[str, List[str]]
self._start_watching_queue = asyncio.Queue() # type: asyncio.Queue[Tuple[str, str]]
async def main(self):
# resend existing subscriptions if we were restarted
for addr in self.watched_addresses:
await self._add_address(addr)
# main loop
while True:
addr, url = await self._start_watching_queue.get()
self.watched_addresses[addr].append(url)
await self._add_address(addr)
async def start_watching_addr(self, addr: str, url: str):
await self._start_watching_queue.put((addr, url))
async def stop_watching_addr(self, addr: str):
self.watched_addresses.pop(addr, None)
# TODO blockchain.scripthash.unsubscribe
async def _on_address_status(self, addr, status):
if addr not in self.watched_addresses:
return
self.logger.info(f'new status for addr {addr}')
headers = {'content-type': 'application/json'}
data = {'address': addr, 'status': status}
for url in self.watched_addresses[addr]:
try:
async with make_aiohttp_session(proxy=self.network.proxy, headers=headers) as session:
async with session.post(url, json=data, headers=headers) as resp:
await resp.text()
except Exception as e:
self.logger.info(repr(e))
else:
self.logger.info(f'Got Response for {addr}')
| 41.851485
| 122
| 0.662014
|
ceb141dc000b61f76d9cd30af8be8234e3c5bbd0
| 1,125
|
py
|
Python
|
sierpinski.py
|
sytong/turtle-graphics-fun
|
3e5eb609c9ec93d41cc315ff5561e5f468959be3
|
[
"MIT"
] | null | null | null |
sierpinski.py
|
sytong/turtle-graphics-fun
|
3e5eb609c9ec93d41cc315ff5561e5f468959be3
|
[
"MIT"
] | null | null | null |
sierpinski.py
|
sytong/turtle-graphics-fun
|
3e5eb609c9ec93d41cc315ff5561e5f468959be3
|
[
"MIT"
] | null | null | null |
import turtle
import math
def teleport(koopa, pos):
koopa.hideturtle()
koopa.up()
koopa.setpos(pos)
koopa.down()
koopa.showturtle()
def draw_triangle(koopa, pos, length, angle, color):
teleport(koopa, pos)
koopa.setheading(0)
koopa.right(30)
koopa.fill(True)
koopa.fillcolor(color)
for i in range(3):
koopa.forward(length)
koopa.right(angle)
koopa.fill(False)
def sierpinski(koopa, pos, length, level):
draw_triangle(koopa, pos, length, -120, "white")
if level > 0:
sierpinski(koopa, (pos[0], pos[1] + length * math.sin(math.radians(60))), length/2, level-1)
sierpinski(koopa, (pos[0]-length/2, pos[1]), length/2, level-1)
sierpinski(koopa, (pos[0]+length/2, pos[1]), length/2, level-1)
def main():
window = turtle.Screen()
window.bgcolor("black")
turtle.mode("logo")
koopa = turtle.Turtle()
koopa.shape("classic")
koopa.color("#028482","#028482")
koopa.speed(1000)
# The board/background
draw_triangle(koopa, (-300,-300), 600, 120, "#028482")
# The actual Sierpinski Triangle
sierpinski(koopa, (0, -300), 300, 3)
window.exitonclick()
main()
| 22.5
| 96
| 0.667556
|
fa0ee2977c1bb71a451897f957e916a09076aa2e
| 822
|
py
|
Python
|
docs/multiselect/simple.py
|
snehilvj/dmc-docs
|
f8e564dd93f005c8c2cdf84ad20f41c668041080
|
[
"MIT"
] | 6
|
2022-01-28T17:12:58.000Z
|
2022-03-16T01:29:18.000Z
|
docs/multiselect/simple.py
|
snehilvj/dmc-demo
|
3ac16f017922f8cdb322c91c29ad3144fd0bb886
|
[
"MIT"
] | 1
|
2022-01-07T21:21:07.000Z
|
2022-01-22T12:07:28.000Z
|
docs/multiselect/simple.py
|
snehilvj/dmc-demo
|
3ac16f017922f8cdb322c91c29ad3144fd0bb886
|
[
"MIT"
] | 1
|
2022-02-05T18:00:36.000Z
|
2022-02-05T18:00:36.000Z
|
import dash_mantine_components as dmc
from dash import Output, Input, html, callback
component = html.Div(
[
dmc.MultiSelect(
label="Select frameworks",
placeholder="Select all you like!",
id="framework-multi-select",
value=["ng", "vue"],
data=[
{"value": "react", "label": "React"},
{"value": "ng", "label": "Angular"},
{"value": "svelte", "label": "Svelte"},
{"value": "vue", "label": "Vue"},
],
style={"width": 400, "marginBottom": 10},
),
dmc.Text(id="multi-selected-value"),
]
)
@callback(
Output("multi-selected-value", "children"), Input("framework-multi-select", "value")
)
def select_value(value):
return ", ".join(value)
| 28.344828
| 88
| 0.512165
|
7da2e9f192f21de6c4ed6b84c021299f87dce4cf
| 1,826
|
py
|
Python
|
classifier/svm_standard/inout.py
|
ecohealthalliance/eha_grit
|
cb95b759222ca7a416dd7d439571e7b610dd5e23
|
[
"Apache-2.0"
] | null | null | null |
classifier/svm_standard/inout.py
|
ecohealthalliance/eha_grit
|
cb95b759222ca7a416dd7d439571e7b610dd5e23
|
[
"Apache-2.0"
] | null | null | null |
classifier/svm_standard/inout.py
|
ecohealthalliance/eha_grit
|
cb95b759222ca7a416dd7d439571e7b610dd5e23
|
[
"Apache-2.0"
] | null | null | null |
import csv
import networkx as nx
from networkx.readwrite import json_graph
Y = 100
MINOR = 25
def read_table (path):
nodes = []
#buffer = open (path, 'r').read ()
#buffer = buffer.replace ('\r', '')
#rows = buffer.split ('\n')
rows = csv.reader (open (path, 'rU'))
contrib = []
for elem in rows.next ():
if len (elem) > 0:
contrib.append (True)
else:
contrib.append (False)
keys = rows.next ()
id = 0
for row in iter (rows):
#if len (row) == 0:
# continue
pos = []
attr = {}
for key, value, include in zip (keys, row, contrib):
#print ' '.join ([key, value, str (include)])
if include:
if value == 'y':
value = Y
elif value == 'minor':
value = MINOR
elif len (value) == 0:
value = 0
pos.append (float (value))
attr[key] = value
item = {
'_id': id,
'pos': pos,
'attr': attr
}
nodes.append (item)
id += 1
return nodes
def equal_weights (nodes):
weights = []
for i in range (0, len (nodes[0]['pos'])):
weights.append (1.0)
return weights
def make_graph (nodes, weights):
G = nx.Graph ()
for node in nodes:
G.add_node (node['_id'], node)
for i, first in enumerate (nodes):
for j, second in enumerate (nodes):
if j >= i:
continue
total = 0.0
for w, f, s in zip (weights, first['pos'], second['pos']):
total += w * ((f - s) / 100.0)
if total > 0.0:
G.add_edge (first['_id'], second['_id'], {'weight': total})
return G
| 26.085714
| 75
| 0.456188
|
9c0858df2e6aa25f28e1c9c98a724f8d4d807357
| 364
|
py
|
Python
|
config.dist.py
|
Ths2-9Y-LqJt6/Cattrotar
|
bc9acd8ab75563d746c5c1ff9a30f01b21019f9f
|
[
"MIT"
] | 1
|
2022-01-28T17:26:04.000Z
|
2022-01-28T17:26:04.000Z
|
config.dist.py
|
mrjones-plip/Cattrotar
|
bc9acd8ab75563d746c5c1ff9a30f01b21019f9f
|
[
"MIT"
] | 3
|
2020-02-28T21:58:23.000Z
|
2020-03-03T23:15:27.000Z
|
config.dist.py
|
mrjones-plip/Cattrotar
|
bc9acd8ab75563d746c5c1ff9a30f01b21019f9f
|
[
"MIT"
] | null | null | null |
# which chromecasts to use
chromecasts = ("This Room", "Another Room")
# use external display or not
use_display = False
# which GPIO pins your rotary encoder is useing
clk = 17
dt = 18
sw = 23
# how big for the font to be on the screen
font_size = 55
# set to 'raspberry' for raspberry pi or 'orange' or orange pi zero
board_type = 'raspberry'
debug = True
| 18.2
| 67
| 0.717033
|
2025a0d565211a049d60d5d62a1e721c320c06f8
| 5,606
|
py
|
Python
|
tests/output/kml.py
|
roshanmaskey/plaso
|
637856f578eb4bc81f62b97d7f483f69314e7f47
|
[
"Apache-2.0"
] | 1,253
|
2015-01-02T13:58:02.000Z
|
2022-03-31T08:43:39.000Z
|
tests/output/kml.py
|
roshanmaskey/plaso
|
637856f578eb4bc81f62b97d7f483f69314e7f47
|
[
"Apache-2.0"
] | 3,388
|
2015-01-02T11:17:58.000Z
|
2022-03-30T10:21:45.000Z
|
tests/output/kml.py
|
roshanmaskey/plaso
|
637856f578eb4bc81f62b97d7f483f69314e7f47
|
[
"Apache-2.0"
] | 376
|
2015-01-20T07:04:54.000Z
|
2022-03-04T23:53:00.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the KML output module."""
import io
import os
import sys
import unittest
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.path import factory as path_spec_factory
from plaso.lib import definitions
from plaso.output import kml
from tests.containers import test_lib as containers_test_lib
from tests.output import test_lib
class KMLOutputTest(test_lib.OutputModuleTestCase):
"""Tests for the KML output module."""
# pylint: disable=protected-access
_OS_PATH_SPEC = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location='{0:s}{1:s}'.format(
os.path.sep, os.path.join('cases', 'image.dd')))
_TEST_EVENTS = [
{'data_type': 'test:output',
'hostname': 'ubuntu',
'path_spec': path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TSK, inode=15,
location='/var/log/syslog.1', parent=_OS_PATH_SPEC),
'text': (
'Reporter <CRON> PID: |8442| (pam_unix(cron:session): session\n '
'closed for user root)'),
'timestamp': '2012-06-27 18:17:01',
'timestamp_desc': definitions.TIME_DESCRIPTION_UNKNOWN,
'username': 'root'},
{'data_type': 'test:output',
'hostname': 'ubuntu',
'latitude': 37.4222899014,
'longitude': -122.082203543,
'path_spec': path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TSK, inode=15,
location='/var/log/syslog.1', parent=_OS_PATH_SPEC),
'text': (
'Reporter <CRON> PID: |8442| (pam_unix(cron:session): session\n '
'closed for user root)'),
'timestamp': '2012-06-27 18:17:01',
'timestamp_desc': definitions.TIME_DESCRIPTION_UNKNOWN,
'username': 'root'}]
def testWriteHeader(self):
"""Tests the WriteHeader function."""
test_file_object = io.StringIO()
output_mediator = self._CreateOutputMediator()
output_module = kml.KMLOutputModule(output_mediator)
output_module._file_object = test_file_object
output_module.WriteHeader()
expected_header = (
'<?xml version="1.0" encoding="utf-8"?>'
'<kml xmlns="http://www.opengis.net/kml/2.2"><Document>')
header = test_file_object.getvalue()
self.assertEqual(header, expected_header)
def testWriteFooter(self):
"""Tests the WriteFooter function."""
test_file_object = io.StringIO()
output_mediator = self._CreateOutputMediator()
output_module = kml.KMLOutputModule(output_mediator)
output_module._file_object = test_file_object
output_module.WriteFooter()
footer = test_file_object.getvalue()
self.assertEqual(footer, '</Document></kml>')
def testWriteEventBody(self):
"""Tests the WriteEventBody function."""
# Test event without geo-location.
test_file_object = io.StringIO()
output_mediator = self._CreateOutputMediator()
output_module = kml.KMLOutputModule(output_mediator)
output_module._file_object = test_file_object
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
output_module.WriteEventBody(event, event_data, event_data_stream, None)
event_body = test_file_object.getvalue()
self.assertEqual(event_body, '')
# Test event with geo-location.
test_file_object = io.StringIO()
output_mediator = self._CreateOutputMediator()
output_module = kml.KMLOutputModule(output_mediator)
output_module._file_object = test_file_object
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[1]))
output_module.WriteEventBody(event, event_data, event_data_stream, None)
event_body = test_file_object.getvalue()
event_identifier = event.GetIdentifier()
event_identifier_string = event_identifier.CopyToString()
if sys.platform.startswith('win'):
# The dict comparison is very picky on Windows hence we
# have to make sure the drive letter is in the same case.
expected_os_location = os.path.abspath('\\{0:s}'.format(
os.path.join('cases', 'image.dd')))
else:
expected_os_location = '{0:s}{1:s}'.format(
os.path.sep, os.path.join('cases', 'image.dd'))
expected_event_body = (
'<Placemark><name>{0:s}</name><description>'
'+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-'
'+-+-+-+-+-+-\n'
'[Timestamp]:\n'
' 2012-06-27T18:17:01.000000Z\n'
'\n'
'[Pathspec]:\n'
' type: OS, location: {1:s}\n'
' type: TSK, inode: 15, location: /var/log/syslog.1\n'
'\n'
'[Reserved attributes]:\n'
' {{data_type}} test:output\n'
' {{display_name}} TSK:/var/log/syslog.1\n'
' {{filename}} /var/log/syslog.1\n'
' {{hostname}} ubuntu\n'
' {{inode}} 15\n'
' {{username}} root\n'
'\n'
'[Additional attributes]:\n'
' {{latitude}} 37.4222899014\n'
' {{longitude}} -122.082203543\n'
' {{text}} Reporter <CRON> PID: |8442| '
'(pam_unix(cron:session): session\n'
' closed for user root)\n'
'\n'
'</description>'
'<Point><coordinates>-122.082203543,37.4222899014</coordinates>'
'</Point></Placemark>').format(
event_identifier_string, expected_os_location)
self.assertEqual(event_body.split('\n'), expected_event_body.split('\n'))
if __name__ == '__main__':
unittest.main()
| 34.604938
| 78
| 0.648948
|
190dbb45fd15ff38fb94d45305b36b87c3bae174
| 1,854
|
py
|
Python
|
setup.py
|
sommersoft/Adafruit_CircuitPython_TinyLoRa
|
4a0cb9deb7590c35e32f6353dce0a3b08eb1c47a
|
[
"MIT"
] | null | null | null |
setup.py
|
sommersoft/Adafruit_CircuitPython_TinyLoRa
|
4a0cb9deb7590c35e32f6353dce0a3b08eb1c47a
|
[
"MIT"
] | null | null | null |
setup.py
|
sommersoft/Adafruit_CircuitPython_TinyLoRa
|
4a0cb9deb7590c35e32f6353dce0a3b08eb1c47a
|
[
"MIT"
] | null | null | null |
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, "README.rst"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="adafruit-circuitpython-tinylora",
use_scm_version=True,
setup_requires=["setuptools_scm"],
description="CircuitPython library for LoRaWAN and The Things Network.",
long_description=long_description,
long_description_content_type="text/x-rst",
# The project's main homepage.
url="https://github.com/adafruit/Adafruit_CircuitPython_TinyLoRa",
# Author details
author="Adafruit Industries",
author_email="circuitpython@adafruit.com",
install_requires=["Adafruit-Blinka", "adafruit-circuitpython-busdevice"],
# Choose your license
license="MIT",
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries",
"Topic :: System :: Hardware",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
],
# What does your project relate to?
keywords="adafruit lorawan thethingsnetwork hardware micropython circuitpython",
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=["adafruit_tinylora"],
)
| 34.981132
| 84
| 0.705502
|
ae2bf36be96e77526e9c6a745f2a1fab1af83e41
| 1,092
|
py
|
Python
|
read_bases.py
|
mcgyver5/pftools
|
49b58127375e5795aef2efca5fc65e6b826b7b97
|
[
"MIT"
] | 1
|
2020-01-27T18:28:00.000Z
|
2020-01-27T18:28:00.000Z
|
read_bases.py
|
mcgyver5/pftools
|
49b58127375e5795aef2efca5fc65e6b826b7b97
|
[
"MIT"
] | 1
|
2020-02-10T00:07:30.000Z
|
2020-02-10T00:07:30.000Z
|
read_bases.py
|
mcgyver5/pftools
|
49b58127375e5795aef2efca5fc65e6b826b7b97
|
[
"MIT"
] | 1
|
2020-02-03T19:04:10.000Z
|
2020-02-03T19:04:10.000Z
|
import sys
import binascii
if len(sys.argv) > 2:
answer = ""
cat = sys.argv[1]
if cat == "b":
for x in range(2,len(sys.argv)):
s = sys.argv[x]
i = int(s,2)
answer = answer + binascii.unhexlify('%x' % i)
if cat == "dec":
for x in range(2, len(sys.argv)):
s = sys.argv[x]
for y in range(0, len(s),2):
i = int(s[y:y+2])
answer = answer + chr(i)
if cat == "oct":
for h in range(2,len(sys.argv)):
s = sys.argv[h]
for y2 in range(0, len(s),3):
i = int(s[y2:y2+3],8)
answer = answer + chr(i)
if cat == "hex":
for h in range(2,len(sys.argv)):
s = sys.argv[h]
for y2 in range(0, len(s),2):
i = int(s[y2:y2+2],16)
answer = answer + chr(i)
print(answer)
else:
print("Need at least two arguments. First argument is type: b= binary, dec=decimal, hex=hex.")
print("example: $ python read_binary.py b 01100110 ")
sys.exit(1)
| 29.513514
| 100
| 0.466117
|
aa36ca40ac628d2ab3aaed44bee41c39fbfc0f79
| 753
|
py
|
Python
|
tests/modules/span_extractors/test_concat_span_extractor.py
|
altescy/xallennlp
|
9c10ec8832d551e160f6cf63345dda206395a9dd
|
[
"MIT"
] | 7
|
2020-06-21T02:33:16.000Z
|
2022-01-26T10:45:11.000Z
|
tests/modules/span_extractors/test_concat_span_extractor.py
|
altescy/xallennlp
|
9c10ec8832d551e160f6cf63345dda206395a9dd
|
[
"MIT"
] | 7
|
2021-07-11T09:00:16.000Z
|
2022-01-17T06:53:50.000Z
|
tests/modules/span_extractors/test_concat_span_extractor.py
|
altescy/xallennlp
|
9c10ec8832d551e160f6cf63345dda206395a9dd
|
[
"MIT"
] | 3
|
2020-07-23T09:41:30.000Z
|
2021-06-10T03:55:10.000Z
|
import torch
from allennlp.modules.span_extractors import EndpointSpanExtractor, SelfAttentiveSpanExtractor
from xallennlp.modules.span_extractors import ConcatSpanExtractor
def test_concat_span_extractor() -> None:
inputs = torch.rand((2, 4, 5))
spans = torch.LongTensor([[[0, 2], [1, 1]], [[1, 2], [2, 3]]])
extractor = ConcatSpanExtractor(
span_extractors=[
EndpointSpanExtractor(input_dim=5, combination="x,y"),
SelfAttentiveSpanExtractor(input_dim=5),
],
num_width_embeddings=4,
span_width_embedding_dim=3,
)
assert extractor.get_input_dim() == 5
assert extractor.get_output_dim() == 18
output = extractor(inputs, spans)
assert output.size() == (2, 2, 18)
| 32.73913
| 94
| 0.679947
|
3b9c7f279c86077ba2443274ad1f330df7e97c24
| 18,989
|
py
|
Python
|
learn_to_infer/gmm_models.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1
|
2022-03-13T21:48:52.000Z
|
2022-03-13T21:48:52.000Z
|
learn_to_infer/gmm_models.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | null | null | null |
learn_to_infer/gmm_models.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1
|
2022-03-30T07:20:29.000Z
|
2022-03-30T07:20:29.000Z
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer models for performing inference in a GMM.
"""
from functools import partial
from . import transformer
from . import util
import flax
import jax
from jax import vmap
import jax.numpy as jnp
import jax.random
import jax.scipy as jscipy
class MeanInferenceMachine(object):
"""Model which predicts cluster means from a batch of data."""
def __init__(self,
data_dim=2,
max_k=2,
max_num_data_points=25,
num_heads=8,
num_encoders=6,
num_decoders=6,
qkv_dim=512,
activation_fn=flax.deprecated.nn.relu,
weight_init=jax.nn.initializers.xavier_uniform()):
"""Creates the model.
Args:
data_dim: The dimensionality of the data points to be fed in.
max_k: The maximum number of clusters that could occur in the data.
max_num_data_points: The maximum number of data points that could be
fed in at one time.
num_heads: The number of heads to use in the transformer.
num_encoders: The number of encoder layers to use in the transformer.
num_decoders: The number of decoder layers to use in the transformer.
qkv_dim: The dimensions of the queries, keys, and values in the
transformer.
activation_fn: The activation function to use for hidden layers.
weight_init: The weight initializer.
"""
self.data_dim = data_dim
self.max_k = max_k
self.max_num_data_points = max_num_data_points
self.tfmr = transformer.EncoderDecoderTransformer.partial(
target_dim=data_dim,
max_input_length=max_num_data_points, max_target_length=max_k,
num_heads=num_heads, num_encoders=num_encoders,
num_decoders=num_decoders, qkv_dim=qkv_dim,
activation_fn=activation_fn, weight_init=weight_init)
def init_params(self, key):
"""Initializes the parameters of the model using dummy data.
Args:
key: A JAX PRNG key
Returns:
params: The parameters of the model.
"""
batch_size = 1
key, subkey = jax.random.split(key)
inputs = jax.random.normal(
subkey, [batch_size, self.max_num_data_points, self.data_dim])
input_lengths = jnp.full([batch_size], self.max_num_data_points)
ks = jnp.full([batch_size], self.max_k)
_, params = self.tfmr.init(key, inputs, input_lengths, ks)
return params
def loss(self, params, inputs, input_lengths, true_params, ks, key):
"""Computes the wasserstein loss for this model.
Args:
params: The parameters of the model, returned from init().
inputs: A [batch_size, max_num_data_points, data_dim] set of input data.
input_lengths: A [batch_size] set of integers representing the number of
data points in each batch element.
true_params: A three-tuple containing
true_means: A [batch_size, max_k] tensor containing the true means of
the cluster components for each batch element.
true_scales: Unused.
true_weights: Unused.
ks: A [batch_size] set of integers representing the true number of
clusters in each batch element.
key: A JAX PRNG key.
Returns:
The wasserstein distance from the set of predicted mus to the true set
of mus, a tensor of shape [batch_size].
"""
true_means, _, _ = true_params
return self.tfmr.wasserstein_distance_loss(
params, inputs, input_lengths, true_means, ks, key)
def predict(self, params, inputs, input_lengths, ks):
"""Predicts the cluster means for the given data sets.
Args:
params: The parameters of the model, returned from init().
inputs: A [batch_size, max_num_data_points, data_dim] set of input data.
input_lengths: A [batch_size] set of integers, the number of data points
in each batch element.
ks: A [batch_size] set of integers, the number of clusters in each batch
element.
Returns:
The predicted means, a tensor of shape [batch_size, max_k, data_dim].
"""
return self.tfmr.call(params, inputs, input_lengths, ks)
def classify(self, params, inputs, input_lengths, ks):
"""Assigns each point to cluster based on the predicted cluster means.
Args:
params: The parameters of the model, returned from init().
inputs: A [batch_size, max_num_data_points, data_dim] set of input data.
input_lengths: A [batch_size] set of integers, the number of data points
in each batch element.
ks: A [batch_size] set of integers, the number of clusters in each batch
element.
Returns:
The predicted clusters, an integer tensor of shape
[batch_size, max_num_data_points]. Each element is in [0, max_num_k).
"""
predicted_means = self.predict(params, inputs, input_lengths, ks)
# [batch_size, max_input_length, max_k]
dists = util.pair_dists(inputs, predicted_means)
dists = jnp.where(
util.make_mask(ks, self.max_k)[:, jnp.newaxis, :], dists,
jnp.full_like(dists, jnp.inf))
return jnp.argmin(dists, axis=-1), predicted_means
def flatten_scale(scale):
dim = scale.shape[-1]
log_diag = jnp.log(jnp.diag(scale))
scale = scale.at[jnp.diag_indices(dim)].set(log_diag)
return scale[jnp.tril_indices(dim)]
def unflatten_scale(flat_scale, original_dim):
out = jnp.zeros([original_dim, original_dim], dtype=flat_scale.dtype)
out = out.at[jnp.tril_indices(original_dim)].set(flat_scale)
exp_diag = jnp.exp(jnp.diag(out))
return out.at[jnp.diag_indices(original_dim)].set(exp_diag)
class MeanScaleInferenceMachine(object):
def __init__(self,
data_dim=2,
max_k=2,
max_num_data_points=25,
num_heads=8,
num_encoders=6,
num_decoders=6,
qkv_dim=512,
activation_fn=flax.deprecated.nn.relu,
weight_init=jax.nn.initializers.xavier_uniform()):
"""Creates the model.
Args:
data_dim: The dimensionality of the data points to be fed in.
max_k: The maximum number of clusters that could occur in the data.
max_num_data_points: The maximum number of data points that could be
fed in at one time.
num_heads: The number of heads to use in the transformer.
num_encoders: The number of encoder layers to use in the transformer.
num_decoders: The number of decoder layers to use in the transformer.
qkv_dim: The dimensions of the queries, keys, and values in the
transformer.
activation_fn: The activation function to use for hidden layers.
weight_init: The weight initializer.
"""
self.data_dim = data_dim
self.max_k = max_k
self.max_num_data_points = max_num_data_points
target_dim = data_dim + int((data_dim*(data_dim+1))/2)
self.tfmr = transformer.EncoderDecoderTransformer.partial(
target_dim=target_dim,
max_input_length=max_num_data_points, max_target_length=max_k,
num_heads=num_heads, num_encoders=num_encoders,
num_decoders=num_decoders, qkv_dim=qkv_dim,
activation_fn=activation_fn, weight_init=weight_init)
def init_params(self, key):
"""Initializes the parameters of the model using dummy data.
Args:
key: A JAX PRNG key
Returns:
params: The parameters of the model.
"""
batch_size = 1
key, subkey = jax.random.split(key)
inputs = jax.random.normal(
subkey, [batch_size, self.max_num_data_points, self.data_dim])
input_lengths = jnp.full([batch_size], self.max_num_data_points)
ks = jnp.full([batch_size], self.max_k)
_, params = self.tfmr.init(key, inputs, input_lengths, ks)
return params
def loss(self, params, inputs, input_lengths, true_params, ks, key):
"""Computes the wasserstein loss for this model.
Args:
params: The parameters of the model, returned from init().
inputs: A [batch_size, max_num_data_points, data_dim] set of input data.
input_lengths: A [batch_size] set of integers representing the number of
data points in each batch element.
true_params: A three-tuple containing
true_means: A [batch_size, max_k, data_dim] tensor containing the true
means of the cluster components for each batch element.
true_scales: A [batch_size, max_k, data_dim, data_dim] tensor containing
the true scales of the cluster components for each batch element.
Should be the lower-triangular square root of a PSD matrix.
true_log_weights: Unused.
ks: A [batch_size] set of integers representing the true number of
clusters in each batch element.
key: A JAX PRNG key.
Returns:
The wasserstein distance from the set of predicted mus to the true set
of mus, a tensor of shape [batch_size].
"""
true_means, true_scales, _ = true_params
flat_scales = vmap(vmap(flatten_scale))(true_scales)
targets = jnp.concatenate([true_means, flat_scales], axis=-1)
return self.tfmr.wasserstein_distance_loss(
params, inputs, input_lengths, targets, ks, key)
def predict(self, params, inputs, input_lengths, ks):
"""Predicts the cluster means for the given data sets.
Args:
params: The parameters of the model, returned from init().
inputs: A [batch_size, max_num_data_points, data_dim] set of input data.
input_lengths: A [batch_size] set of integers, the number of data points
in each batch element.
ks: A [batch_size] set of integers, the number of clusters in each batch
element.
Returns:
params: A tuple containing
The predicted means, a tensor of shape [batch_size, max_k, data_dim].
The predicted scales, a tensor of shape
[batch_size, max_k, data_dim, data_dim].
"""
raw_outs = self.tfmr.call(params, inputs, input_lengths, ks)
mus = raw_outs[:, :, :self.data_dim]
us = vmap(vmap(unflatten_scale, in_axes=(0, None)), in_axes=(0, None))
scales = us(raw_outs[:, :, self.data_dim:], self.data_dim)
return mus, scales
def classify(self, params, inputs, input_lengths, ks):
"""Assigns each point to cluster based on the predicted cluster parameters.
Args:
params: The parameters of the model, returned from init().
inputs: A [batch_size, max_num_data_points, data_dim] set of input data.
input_lengths: A [batch_size] set of integers, the number of data points
in each batch element.
ks: A [batch_size] set of integers, the number of clusters in each batch
element.
Returns:
clusters: The predicted clusters, an integer tensor of shape
[batch_size, max_num_data_points]. Each element is in [0, max_num_k).
params: The predicted cluster parameters (means and covariances).
"""
means, scales = self.predict(params, inputs, input_lengths, ks)
covs = jnp.einsum("...ik,...jk->...ij", scales, scales)
log_ps = vmap(
vmap(
vmap(
jscipy.stats.multivariate_normal.logpdf,
in_axes=(0, None, None)),
in_axes=(None, 0, 0)))(inputs, means, covs)
log_ps = jnp.where(
util.make_mask(ks, self.max_k)[:, :, jnp.newaxis], log_ps,
jnp.full_like(log_ps, -jnp.inf))
clusters = jnp.argmax(log_ps, axis=-2)
return clusters, (means, covs)
class MeanScaleWeightInferenceMachine(object):
def __init__(self,
data_dim=2,
max_k=2,
max_num_data_points=25,
num_heads=8,
num_encoders=6,
num_decoders=6,
qkv_dim=512,
activation_fn=flax.deprecated.nn.relu,
weight_init=jax.nn.initializers.xavier_uniform()):
"""Creates the model.
Args:
data_dim: The dimensionality of the data points to be fed in.
max_k: The maximum number of clusters that could occur in the data.
max_num_data_points: The maximum number of data points that could be
fed in at one time.
num_heads: The number of heads to use in the transformer.
num_encoders: The number of encoder layers to use in the transformer.
num_decoders: The number of decoder layers to use in the transformer.
qkv_dim: The dimensions of the queries, keys, and values in the
transformer.
activation_fn: The activation function to use for hidden layers.
weight_init: The weight initializer.
"""
self.max_num_data_points = max_num_data_points
self.data_dim = data_dim
self.max_k = max_k
target_dim = 1 + data_dim + int((data_dim*(data_dim+1))/2)
self.tfmr = transformer.EncoderDecoderTransformer.partial(
target_dim=target_dim,
max_input_length=max_num_data_points, max_target_length=max_k,
num_heads=num_heads, num_encoders=num_encoders,
num_decoders=num_decoders, qkv_dim=qkv_dim,
activation_fn=activation_fn, weight_init=weight_init)
def init_params(self, key):
"""Initializes the parameters of the model using dummy data.
Args:
key: A JAX PRNG key
Returns:
params: The parameters of the model.
"""
key, subkey = jax.random.split(key)
batch_size = 1
inputs = jax.random.normal(
subkey, [batch_size, self.max_num_data_points, self.data_dim])
input_lengths = jnp.full([batch_size], self.max_num_data_points)
ks = jnp.full([batch_size], self.max_k)
_, params = self.tfmr.init(key, inputs, input_lengths, ks)
return params
def loss(self, params, inputs, input_lengths, true_params, ks, key):
"""Computes the wasserstein loss for this model.
Args:
params: The parameters of the model, returned from init().
inputs: A [batch_size, max_num_data_points, data_dim] set of input data.
input_lengths: A [batch_size] set of integers representing the number of
data points in each batch element.
true_params: A three-tuple containing
true_means: A [batch_size, max_k, data_dim] tensor containing the true
means of the cluster components for each batch element.
true_scales: A [batch_size, max_k, data_dim, data_dim] tensor containing
the true scales of the cluster components for each batch element.
Should be the lower-triangular square root of a PSD matrix.
true_log_weights: A [batch_size, max_k] tensor containing the true
log weights of the cluster components for each batch element.
ks: A [batch_size] set of integers representing the true number of
clusters in each batch element.
key: A JAX PRNG key.
Returns:
The wasserstein distance from the set of predicted mus to the true set
of mus, a tensor of shape [batch_size].
"""
true_means, true_scales, true_log_weights = true_params
flat_scales = vmap(vmap(flatten_scale))(true_scales)
targets = jnp.concatenate(
[true_log_weights[:, :, jnp.newaxis], true_means, flat_scales], axis=-1)
return self.tfmr.wasserstein_distance_loss(params, inputs, input_lengths,
targets, ks, key)
def predict(self, params, inputs, input_lengths, ks):
"""Predicts the cluster means for the given data sets.
Args:
params: The parameters of the model, returned from init().
inputs: A [batch_size, max_num_data_points, data_dim] set of input data.
input_lengths: A [batch_size] set of integers, the number of data points
in each batch element.
ks: A [batch_size] set of integers, the number of clusters in each batch
element.
Returns:
params: A tuple containing
The predicted means, a tensor of shape [batch_size, max_k, data_dim].
The predicted scales, a tensor of shape
[batch_size, max_k, data_dim, data_dim].
The predicted log weights, a tensor of shape [batch_size, max_k].
"""
raw_outs = self.tfmr.call(params, inputs, input_lengths, ks)
log_weights = raw_outs[:, :, 0]
mus = raw_outs[:, :, 1:self.data_dim + 1]
us = vmap(vmap(unflatten_scale, in_axes=(0, None)), in_axes=(0, None))
scales = us(raw_outs[:, :, self.data_dim + 1:], self.data_dim)
return mus, scales, log_weights
def classify(self, params, inputs, input_lengths, ks):
"""Assigns each point to cluster based on the predicted cluster parameters.
Args:
params: The parameters of the model, returned from init().
inputs: A [batch_size, max_num_data_points, data_dim] set of input data.
input_lengths: A [batch_size] set of integers, the number of data points
in each batch element.
ks: A [batch_size] set of integers, the number of clusters in each batch
element.
Returns:
clusters: The predicted clusters, an integer tensor of shape
[batch_size, max_num_data_points]. Each element is in [0, max_num_k).
params: The predicted cluster parameters (means, covariances, and
log weights).
"""
means, scales, log_weights = self.predict(params, inputs, input_lengths, ks)
covs = jnp.einsum("...ik,...jk->...ij", scales, scales)
log_ps = vmap(
vmap(
vmap(
jscipy.stats.multivariate_normal.logpdf,
in_axes=(0, None, None)),
in_axes=(None, 0, 0)))(inputs, means, covs)
log_ps = log_ps + log_weights[Ellipsis, jnp.newaxis]
log_ps = jnp.where(
util.make_mask(ks, self.max_k)[:, :, jnp.newaxis], log_ps,
jnp.full_like(log_ps, -jnp.inf))
clusters = jnp.argmax(log_ps, axis=-2)
return clusters, (means, covs, log_weights)
def classify_with_defaults(model, params, inputs, batch_size, input_lengths, ks,
max_k, default_cov):
cs, model_params = model.classify(params, inputs, input_lengths, ks)
if isinstance(model, MeanInferenceMachine):
mus = model_params
covs = jnp.tile(default_cov[jnp.newaxis, jnp.newaxis, :, :],
[batch_size, max_k, 1, 1])
log_weights = jnp.zeros([batch_size, max_k])
elif isinstance(model, MeanScaleInferenceMachine):
mus, covs = model_params
log_weights = jnp.zeros([batch_size, max_k])
elif isinstance(model, MeanScaleWeightInferenceMachine):
mus, covs, log_weights = model_params
return cs, (mus, covs, log_weights)
| 41.825991
| 80
| 0.679288
|
3bc0974332193dde6d10907a94eb1aba6b5b81cb
| 38,931
|
py
|
Python
|
superset/security/manager.py
|
hikaya-io/incubator-superset
|
3dac81c89613f04dc9e4424dda043821c7557323
|
[
"Apache-2.0"
] | 1
|
2020-12-07T10:45:34.000Z
|
2020-12-07T10:45:34.000Z
|
superset/security/manager.py
|
hikaya-io/incubator-superset
|
3dac81c89613f04dc9e4424dda043821c7557323
|
[
"Apache-2.0"
] | 26
|
2020-04-14T19:51:36.000Z
|
2022-03-31T02:38:06.000Z
|
superset/security/manager.py
|
hikaya-io/incubator-superset
|
3dac81c89613f04dc9e4424dda043821c7557323
|
[
"Apache-2.0"
] | 1
|
2020-07-12T21:02:18.000Z
|
2020-07-12T21:02:18.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-few-public-methods
"""A set of constants and methods to manage permissions and security"""
import logging
import re
from typing import Any, Callable, cast, List, Optional, Set, Tuple, TYPE_CHECKING, Union
from flask import current_app, g
from flask_appbuilder import Model
from flask_appbuilder.security.sqla.manager import SecurityManager
from flask_appbuilder.security.sqla.models import (
assoc_permissionview_role,
assoc_user_role,
PermissionView,
User,
)
from flask_appbuilder.security.views import (
PermissionModelView,
PermissionViewModelView,
RoleModelView,
UserModelView,
ViewMenuModelView,
)
from flask_appbuilder.widgets import ListWidget
from sqlalchemy import and_, or_
from sqlalchemy.engine.base import Connection
from sqlalchemy.orm import Session
from sqlalchemy.orm.mapper import Mapper
from sqlalchemy.orm.query import Query as SqlaQuery
from superset import sql_parse
from superset.connectors.connector_registry import ConnectorRegistry
from superset.constants import RouteMethod
from superset.errors import ErrorLevel, SupersetError, SupersetErrorType
from superset.exceptions import SupersetSecurityException
from superset.utils.core import DatasourceName, RowLevelSecurityFilterType
if TYPE_CHECKING:
from superset.common.query_context import QueryContext
from superset.connectors.base.models import BaseDatasource
from superset.connectors.druid.models import DruidCluster
from superset.models.core import Database
from superset.models.sql_lab import Query
from superset.sql_parse import Table
from superset.viz import BaseViz
logger = logging.getLogger(__name__)
class SupersetSecurityListWidget(ListWidget):
"""
Redeclaring to avoid circular imports
"""
template = "superset/fab_overrides/list.html"
class SupersetRoleListWidget(ListWidget):
"""
Role model view from FAB already uses a custom list widget override
So we override the override
"""
template = "superset/fab_overrides/list_role.html"
def __init__(self, **kwargs: Any) -> None:
kwargs["appbuilder"] = current_app.appbuilder
super().__init__(**kwargs)
UserModelView.list_widget = SupersetSecurityListWidget
RoleModelView.list_widget = SupersetRoleListWidget
PermissionViewModelView.list_widget = SupersetSecurityListWidget
PermissionModelView.list_widget = SupersetSecurityListWidget
# Limiting routes on FAB model views
UserModelView.include_route_methods = RouteMethod.CRUD_SET | {
RouteMethod.ACTION,
RouteMethod.API_READ,
RouteMethod.ACTION_POST,
"userinfo",
}
RoleModelView.include_route_methods = RouteMethod.CRUD_SET
PermissionViewModelView.include_route_methods = {RouteMethod.LIST}
PermissionModelView.include_route_methods = {RouteMethod.LIST}
ViewMenuModelView.include_route_methods = {RouteMethod.LIST}
RoleModelView.list_columns = ["name"]
RoleModelView.edit_columns = ["name", "permissions", "user"]
RoleModelView.related_views = []
class SupersetSecurityManager( # pylint: disable=too-many-public-methods
SecurityManager
):
userstatschartview = None
READ_ONLY_MODEL_VIEWS = {"DatabaseAsync", "DatabaseView", "DruidClusterModelView"}
USER_MODEL_VIEWS = {
"UserDBModelView",
"UserLDAPModelView",
"UserOAuthModelView",
"UserOIDModelView",
"UserRemoteUserModelView",
}
GAMMA_READ_ONLY_MODEL_VIEWS = {
"SqlMetricInlineView",
"TableColumnInlineView",
"TableModelView",
"DruidColumnInlineView",
"DruidDatasourceModelView",
"DruidMetricInlineView",
"Datasource",
} | READ_ONLY_MODEL_VIEWS
ADMIN_ONLY_VIEW_MENUS = {
"AccessRequestsModelView",
"SQL Lab",
"Refresh Druid Metadata",
"ResetPasswordView",
"RoleModelView",
"LogModelView",
"Security",
"Row Level Security",
"Row Level Security Filters",
"RowLevelSecurityFiltersModelView",
} | USER_MODEL_VIEWS
ALPHA_ONLY_VIEW_MENUS = {
"Manage",
"CSS Templates",
"Queries",
"Import dashboards",
"Upload a CSV",
}
ADMIN_ONLY_PERMISSIONS = {
"can_sql_json", # TODO: move can_sql_json to sql_lab role
"can_override_role_permissions",
"can_sync_druid_source",
"can_override_role_permissions",
"can_approve",
"can_update_role",
"all_query_access",
}
READ_ONLY_PERMISSION = {"can_show", "can_list", "can_get", "can_external_metadata"}
ALPHA_ONLY_PERMISSIONS = {
"muldelete",
"all_database_access",
"all_datasource_access",
}
OBJECT_SPEC_PERMISSIONS = {
"database_access",
"schema_access",
"datasource_access",
"metric_access",
}
ACCESSIBLE_PERMS = {"can_userinfo"}
data_access_permissions = (
"database_access",
"schema_access",
"datasource_access",
"all_datasource_access",
"all_database_access",
"all_query_access",
)
def get_schema_perm( # pylint: disable=no-self-use
self, database: Union["Database", str], schema: Optional[str] = None
) -> Optional[str]:
"""
Return the database specific schema permission.
:param database: The Superset database or database name
:param schema: The Superset schema name
:return: The database specific schema permission
"""
if schema:
return f"[{database}].[{schema}]"
return None
def unpack_schema_perm( # pylint: disable=no-self-use
self, schema_permission: str
) -> Tuple[str, str]:
# [database_name].[schema_name]
schema_name = schema_permission.split(".")[1][1:-1]
database_name = schema_permission.split(".")[0][1:-1]
return database_name, schema_name
def can_access(self, permission_name: str, view_name: str) -> bool:
"""
Return True if the user can access the FAB permission/view, False otherwise.
Note this method adds protection from has_access failing from missing
permission/view entries.
:param permission_name: The FAB permission name
:param view_name: The FAB view-menu name
:returns: Whether the user can access the FAB permission/view
"""
user = g.user
if user.is_anonymous:
return self.is_item_public(permission_name, view_name)
return self._has_view_access(user, permission_name, view_name)
def can_access_all_queries(self) -> bool:
"""
Return True if the user can access all SQL Lab queries, False otherwise.
:returns: Whether the user can access all queries
"""
return self.can_access("all_query_access", "all_query_access")
def can_access_all_datasources(self) -> bool:
"""
Return True if the user can fully access all the Superset datasources, False
otherwise.
:returns: Whether the user can fully access all Superset datasources
"""
return self.can_access("all_datasource_access", "all_datasource_access")
def can_access_all_databases(self) -> bool:
"""
Return True if the user can fully access all the Superset databases, False
otherwise.
:returns: Whether the user can fully access all Superset databases
"""
return self.can_access("all_database_access", "all_database_access")
def can_access_database(self, database: Union["Database", "DruidCluster"]) -> bool:
"""
Return True if the user can fully access the Superset database, False otherwise.
Note for Druid the database is akin to the Druid cluster.
:param database: The Superset database
:returns: Whether the user can fully access the Superset database
"""
return (
self.can_access_all_datasources()
or self.can_access_all_databases()
or self.can_access("database_access", database.perm) # type: ignore
)
def can_access_schema(self, datasource: "BaseDatasource") -> bool:
"""
Return True if the user can fully access the schema associated with the Superset
datasource, False otherwise.
Note for Druid datasources the database and schema are akin to the Druid cluster
and datasource name prefix respectively, i.e., [schema.]datasource.
:param datasource: The Superset datasource
:returns: Whether the user can fully access the datasource's schema
"""
return (
self.can_access_all_datasources()
or self.can_access_database(datasource.database)
or self.can_access("schema_access", datasource.schema_perm or "")
)
def can_access_datasource(self, datasource: "BaseDatasource") -> bool:
"""
Return True if the user can fully access of the Superset datasource, False
otherwise.
:param datasource: The Superset datasource
:returns: Whether the user can fully access the Superset datasource
"""
try:
self.raise_for_access(datasource=datasource)
except SupersetSecurityException:
return False
return True
@staticmethod
def get_datasource_access_error_msg(datasource: "BaseDatasource") -> str:
"""
Return the error message for the denied Superset datasource.
:param datasource: The denied Superset datasource
:returns: The error message
"""
return f"""This endpoint requires the datasource {datasource.name}, database or
`all_datasource_access` permission"""
@staticmethod
def get_datasource_access_link( # pylint: disable=unused-argument
datasource: "BaseDatasource",
) -> Optional[str]:
"""
Return the link for the denied Superset datasource.
:param datasource: The denied Superset datasource
:returns: The access URL
"""
from superset import conf
return conf.get("PERMISSION_INSTRUCTIONS_LINK")
def get_datasource_access_error_object( # pylint: disable=invalid-name
self, datasource: "BaseDatasource"
) -> SupersetError:
"""
Return the error object for the denied Superset datasource.
:param datasource: The denied Superset datasource
:returns: The error object
"""
return SupersetError(
error_type=SupersetErrorType.DATASOURCE_SECURITY_ACCESS_ERROR,
message=self.get_datasource_access_error_msg(datasource),
level=ErrorLevel.ERROR,
extra={
"link": self.get_datasource_access_link(datasource),
"datasource": datasource.name,
},
)
def get_table_access_error_msg( # pylint: disable=no-self-use
self, tables: Set["Table"]
) -> str:
"""
Return the error message for the denied SQL tables.
:param tables: The set of denied SQL tables
:returns: The error message
"""
quoted_tables = [f"`{table}`" for table in tables]
return f"""You need access to the following tables: {", ".join(quoted_tables)},
`all_database_access` or `all_datasource_access` permission"""
def get_table_access_error_object(self, tables: Set["Table"]) -> SupersetError:
"""
Return the error object for the denied SQL tables.
:param tables: The set of denied SQL tables
:returns: The error object
"""
return SupersetError(
error_type=SupersetErrorType.TABLE_SECURITY_ACCESS_ERROR,
message=self.get_table_access_error_msg(tables),
level=ErrorLevel.ERROR,
extra={
"link": self.get_table_access_link(tables),
"tables": [str(table) for table in tables],
},
)
def get_table_access_link( # pylint: disable=unused-argument,no-self-use
self, tables: Set["Table"]
) -> Optional[str]:
"""
Return the access link for the denied SQL tables.
:param tables: The set of denied SQL tables
:returns: The access URL
"""
from superset import conf
return conf.get("PERMISSION_INSTRUCTIONS_LINK")
def can_access_table(self, database: "Database", table: "Table") -> bool:
"""
Return True if the user can access the SQL table, False otherwise.
:param database: The SQL database
:param table: The SQL table
:returns: Whether the user can access the SQL table
"""
try:
self.raise_for_access(database=database, table=table)
except SupersetSecurityException:
return False
return True
def user_view_menu_names(self, permission_name: str) -> Set[str]:
base_query = (
self.get_session.query(self.viewmenu_model.name)
.join(self.permissionview_model)
.join(self.permission_model)
.join(assoc_permissionview_role)
.join(self.role_model)
)
if not g.user.is_anonymous:
# filter by user id
view_menu_names = (
base_query.join(assoc_user_role)
.join(self.user_model)
.filter(self.user_model.id == g.user.id)
.filter(self.permission_model.name == permission_name)
).all()
return {s.name for s in view_menu_names}
# Properly treat anonymous user
public_role = self.get_public_role()
if public_role:
# filter by public role
view_menu_names = (
base_query.filter(self.role_model.id == public_role.id).filter(
self.permission_model.name == permission_name
)
).all()
return {s.name for s in view_menu_names}
return set()
def get_schemas_accessible_by_user(
self, database: "Database", schemas: List[str], hierarchical: bool = True
) -> List[str]:
"""
Return the list of SQL schemas accessible by the user.
:param database: The SQL database
:param schemas: The list of eligible SQL schemas
:param hierarchical: Whether to check using the hierarchical permission logic
:returns: The list of accessible SQL schemas
"""
from superset.connectors.sqla.models import SqlaTable
if hierarchical and self.can_access_database(database):
return schemas
# schema_access
accessible_schemas = {
self.unpack_schema_perm(s)[1]
for s in self.user_view_menu_names("schema_access")
if s.startswith(f"[{database}].")
}
# datasource_access
perms = self.user_view_menu_names("datasource_access")
if perms:
tables = (
self.get_session.query(SqlaTable.schema)
.filter(SqlaTable.database_id == database.id)
.filter(SqlaTable.schema.isnot(None))
.filter(SqlaTable.schema != "")
.filter(or_(SqlaTable.perm.in_(perms)))
.distinct()
)
accessible_schemas.update([table.schema for table in tables])
return [s for s in schemas if s in accessible_schemas]
def get_datasources_accessible_by_user( # pylint: disable=invalid-name
self,
database: "Database",
datasource_names: List[DatasourceName],
schema: Optional[str] = None,
) -> List[DatasourceName]:
"""
Return the list of SQL tables accessible by the user.
:param database: The SQL database
:param datasource_names: The list of eligible SQL tables w/ schema
:param schema: The fallback SQL schema if not present in the table name
:returns: The list of accessible SQL tables w/ schema
"""
if self.can_access_database(database):
return datasource_names
if schema:
schema_perm = self.get_schema_perm(database, schema)
if schema_perm and self.can_access("schema_access", schema_perm):
return datasource_names
user_perms = self.user_view_menu_names("datasource_access")
schema_perms = self.user_view_menu_names("schema_access")
user_datasources = ConnectorRegistry.query_datasources_by_permissions(
self.get_session, database, user_perms, schema_perms
)
if schema:
names = {d.table_name for d in user_datasources if d.schema == schema}
return [d for d in datasource_names if d in names]
full_names = {d.full_name for d in user_datasources}
return [d for d in datasource_names if f"[{database}].[{d}]" in full_names]
def merge_perm(self, permission_name: str, view_menu_name: str) -> None:
"""
Add the FAB permission/view-menu.
:param permission_name: The FAB permission name
:param view_menu_names: The FAB view-menu name
:see: SecurityManager.add_permission_view_menu
"""
logger.warning(
"This method 'merge_perm' is deprecated use add_permission_view_menu"
)
self.add_permission_view_menu(permission_name, view_menu_name)
def _is_user_defined_permission(self, perm: Model) -> bool:
"""
Return True if the FAB permission is user defined, False otherwise.
:param perm: The FAB permission
:returns: Whether the FAB permission is user defined
"""
return perm.permission.name in self.OBJECT_SPEC_PERMISSIONS
def create_custom_permissions(self) -> None:
"""
Create custom FAB permissions.
"""
self.add_permission_view_menu("all_datasource_access", "all_datasource_access")
self.add_permission_view_menu("all_database_access", "all_database_access")
self.add_permission_view_menu("all_query_access", "all_query_access")
def create_missing_perms(self) -> None:
"""
Creates missing FAB permissions for datasources, schemas and metrics.
"""
from superset.models import core as models
logger.info("Fetching a set of all perms to lookup which ones are missing")
all_pvs = set()
for pv in self.get_session.query(self.permissionview_model).all():
if pv.permission and pv.view_menu:
all_pvs.add((pv.permission.name, pv.view_menu.name))
def merge_pv(view_menu: str, perm: str) -> None:
"""Create permission view menu only if it doesn't exist"""
if view_menu and perm and (view_menu, perm) not in all_pvs:
self.add_permission_view_menu(view_menu, perm)
logger.info("Creating missing datasource permissions.")
datasources = ConnectorRegistry.get_all_datasources(self.get_session)
for datasource in datasources:
merge_pv("datasource_access", datasource.get_perm())
merge_pv("schema_access", datasource.get_schema_perm())
logger.info("Creating missing database permissions.")
databases = self.get_session.query(models.Database).all()
for database in databases:
merge_pv("database_access", database.perm)
def clean_perms(self) -> None:
"""
Clean up the FAB faulty permissions.
"""
logger.info("Cleaning faulty perms")
sesh = self.get_session
pvms = sesh.query(PermissionView).filter(
or_(
PermissionView.permission # pylint: disable=singleton-comparison
== None,
PermissionView.view_menu # pylint: disable=singleton-comparison
== None,
)
)
deleted_count = pvms.delete()
sesh.commit()
if deleted_count:
logger.info("Deleted %i faulty permissions", deleted_count)
def sync_role_definitions(self) -> None:
"""
Initialize the Superset application with security roles and such.
"""
from superset import conf
logger.info("Syncing role definition")
self.create_custom_permissions()
# Creating default roles
self.set_role("Admin", self._is_admin_pvm)
self.set_role("Alpha", self._is_alpha_pvm)
self.set_role("Gamma", self._is_gamma_pvm)
self.set_role("granter", self._is_granter_pvm)
self.set_role("sql_lab", self._is_sql_lab_pvm)
# Configure public role
if conf["PUBLIC_ROLE_LIKE"]:
self.copy_role(conf["PUBLIC_ROLE_LIKE"], self.auth_role_public, merge=True)
if conf.get("PUBLIC_ROLE_LIKE_GAMMA", False):
logger.warning(
"The config `PUBLIC_ROLE_LIKE_GAMMA` is deprecated and will be removed "
"in Superset 1.0. Please use `PUBLIC_ROLE_LIKE` instead."
)
self.copy_role("Gamma", self.auth_role_public, merge=True)
self.create_missing_perms()
# commit role and view menu updates
self.get_session.commit()
self.clean_perms()
def _get_pvms_from_builtin_role(self, role_name: str) -> List[PermissionView]:
"""
Gets a list of model PermissionView permissions infered from a builtin role
definition
"""
role_from_permissions_names = self.builtin_roles.get(role_name, [])
all_pvms = self.get_session.query(PermissionView).all()
role_from_permissions = []
for pvm_regex in role_from_permissions_names:
view_name_regex = pvm_regex[0]
permission_name_regex = pvm_regex[1]
for pvm in all_pvms:
if re.match(view_name_regex, pvm.view_menu.name) and re.match(
permission_name_regex, pvm.permission.name
):
if pvm not in role_from_permissions:
role_from_permissions.append(pvm)
return role_from_permissions
def copy_role(
self, role_from_name: str, role_to_name: str, merge: bool = True
) -> None:
"""
Copies permissions from a role to another.
Note: Supports regex defined builtin roles
:param role_from_name: The FAB role name from where the permissions are taken
:param role_to_name: The FAB role name from where the permissions are copied to
:param merge: If merge is true, keep data access permissions
if they already exist on the target role
"""
logger.info("Copy/Merge %s to %s", role_from_name, role_to_name)
# If it's a builtin role extract permissions from it
if role_from_name in self.builtin_roles:
role_from_permissions = self._get_pvms_from_builtin_role(role_from_name)
else:
role_from_permissions = list(self.find_role(role_from_name).permissions)
role_to = self.add_role(role_to_name)
# If merge, recover existing data access permissions
if merge:
for permission_view in role_to.permissions:
if (
permission_view not in role_from_permissions
and permission_view.permission.name in self.data_access_permissions
):
role_from_permissions.append(permission_view)
role_to.permissions = role_from_permissions
self.get_session.merge(role_to)
self.get_session.commit()
def set_role(
self, role_name: str, pvm_check: Callable[[PermissionView], bool]
) -> None:
"""
Set the FAB permission/views for the role.
:param role_name: The FAB role name
:param pvm_check: The FAB permission/view check
"""
logger.info("Syncing %s perms", role_name)
pvms = self.get_session.query(PermissionView).all()
pvms = [p for p in pvms if p.permission and p.view_menu]
role = self.add_role(role_name)
role_pvms = [
permission_view for permission_view in pvms if pvm_check(permission_view)
]
role.permissions = role_pvms
self.get_session.merge(role)
self.get_session.commit()
def _is_admin_only(self, pvm: Model) -> bool:
"""
Return True if the FAB permission/view is accessible to only Admin users,
False otherwise.
Note readonly operations on read only model views are allowed only for admins.
:param pvm: The FAB permission/view
:returns: Whether the FAB object is accessible to only Admin users
"""
if (
pvm.view_menu.name in self.READ_ONLY_MODEL_VIEWS
and pvm.permission.name not in self.READ_ONLY_PERMISSION
):
return True
return (
pvm.view_menu.name in self.ADMIN_ONLY_VIEW_MENUS
or pvm.permission.name in self.ADMIN_ONLY_PERMISSIONS
)
def _is_alpha_only(self, pvm: PermissionModelView) -> bool:
"""
Return True if the FAB permission/view is accessible to only Alpha users,
False otherwise.
:param pvm: The FAB permission/view
:returns: Whether the FAB object is accessible to only Alpha users
"""
if (
pvm.view_menu.name in self.GAMMA_READ_ONLY_MODEL_VIEWS
and pvm.permission.name not in self.READ_ONLY_PERMISSION
):
return True
return (
pvm.view_menu.name in self.ALPHA_ONLY_VIEW_MENUS
or pvm.permission.name in self.ALPHA_ONLY_PERMISSIONS
)
def _is_accessible_to_all(self, pvm: PermissionModelView) -> bool:
"""
Return True if the FAB permission/view is accessible to all, False
otherwise.
:param pvm: The FAB permission/view
:returns: Whether the FAB object is accessible to all users
"""
return pvm.permission.name in self.ACCESSIBLE_PERMS
def _is_admin_pvm(self, pvm: PermissionModelView) -> bool:
"""
Return True if the FAB permission/view is Admin user related, False
otherwise.
:param pvm: The FAB permission/view
:returns: Whether the FAB object is Admin related
"""
return not self._is_user_defined_permission(pvm)
def _is_alpha_pvm(self, pvm: PermissionModelView) -> bool:
"""
Return True if the FAB permission/view is Alpha user related, False
otherwise.
:param pvm: The FAB permission/view
:returns: Whether the FAB object is Alpha related
"""
return not (
self._is_user_defined_permission(pvm) or self._is_admin_only(pvm)
) or self._is_accessible_to_all(pvm)
def _is_gamma_pvm(self, pvm: PermissionModelView) -> bool:
"""
Return True if the FAB permission/view is Gamma user related, False
otherwise.
:param pvm: The FAB permission/view
:returns: Whether the FAB object is Gamma related
"""
return not (
self._is_user_defined_permission(pvm)
or self._is_admin_only(pvm)
or self._is_alpha_only(pvm)
) or self._is_accessible_to_all(pvm)
def _is_sql_lab_pvm(self, pvm: PermissionModelView) -> bool:
"""
Return True if the FAB permission/view is SQL Lab related, False
otherwise.
:param pvm: The FAB permission/view
:returns: Whether the FAB object is SQL Lab related
"""
return (
pvm.view_menu.name
in {"SQL Lab", "SQL Editor", "Query Search", "Saved Queries"}
or pvm.permission.name
in {
"can_sql_json",
"can_csv",
"can_search_queries",
"can_sqllab_viz",
"can_sqllab_table_viz",
"can_sqllab",
}
or (
pvm.view_menu.name in self.USER_MODEL_VIEWS
and pvm.permission.name == "can_list"
)
)
def _is_granter_pvm( # pylint: disable=no-self-use
self, pvm: PermissionModelView
) -> bool:
"""
Return True if the user can grant the FAB permission/view, False
otherwise.
:param pvm: The FAB permission/view
:returns: Whether the user can grant the FAB permission/view
"""
return pvm.permission.name in {"can_override_role_permissions", "can_approve"}
def set_perm( # pylint: disable=no-self-use,unused-argument
self, mapper: Mapper, connection: Connection, target: "BaseDatasource"
) -> None:
"""
Set the datasource permissions.
:param mapper: The table mapper
:param connection: The DB-API connection
:param target: The mapped instance being persisted
"""
link_table = target.__table__ # pylint: disable=no-member
if target.perm != target.get_perm():
connection.execute(
link_table.update()
.where(link_table.c.id == target.id)
.values(perm=target.get_perm())
)
if (
hasattr(target, "schema_perm")
and target.schema_perm != target.get_schema_perm()
):
connection.execute(
link_table.update()
.where(link_table.c.id == target.id)
.values(schema_perm=target.get_schema_perm())
)
pvm_names = []
if target.__tablename__ in {"dbs", "clusters"}:
pvm_names.append(("database_access", target.get_perm()))
else:
pvm_names.append(("datasource_access", target.get_perm()))
if target.schema:
pvm_names.append(("schema_access", target.get_schema_perm()))
# TODO(bogdan): modify slice permissions as well.
for permission_name, view_menu_name in pvm_names:
permission = self.find_permission(permission_name)
view_menu = self.find_view_menu(view_menu_name)
pv = None
if not permission:
permission_table = (
self.permission_model.__table__ # pylint: disable=no-member
)
connection.execute(
permission_table.insert().values(name=permission_name)
)
permission = self.find_permission(permission_name)
if not view_menu:
view_menu_table = (
self.viewmenu_model.__table__ # pylint: disable=no-member
)
connection.execute(view_menu_table.insert().values(name=view_menu_name))
view_menu = self.find_view_menu(view_menu_name)
if permission and view_menu:
pv = (
self.get_session.query(self.permissionview_model)
.filter_by(permission=permission, view_menu=view_menu)
.first()
)
if not pv and permission and view_menu:
permission_view_table = (
self.permissionview_model.__table__ # pylint: disable=no-member
)
connection.execute(
permission_view_table.insert().values(
permission_id=permission.id, view_menu_id=view_menu.id
)
)
def raise_for_access( # pylint: disable=too-many-arguments,too-many-branches
self,
database: Optional["Database"] = None,
datasource: Optional["BaseDatasource"] = None,
query: Optional["Query"] = None,
query_context: Optional["QueryContext"] = None,
table: Optional["Table"] = None,
viz: Optional["BaseViz"] = None,
) -> None:
"""
Raise an exception if the user cannot access the resource.
:param database: The Superset database
:param datasource: The Superset datasource
:param query: The SQL Lab query
:param query_context: The query context
:param table: The Superset table (requires database)
:param viz: The visualization
:raises SupersetSecurityException: If the user cannot access the resource
"""
from superset.connectors.sqla.models import SqlaTable
from superset.sql_parse import Table
if database and table or query:
if query:
database = query.database
database = cast("Database", database)
if self.can_access_database(database):
return
if query:
tables = {
Table(table_.table, table_.schema or query.schema)
for table_ in sql_parse.ParsedQuery(query.sql).tables
}
elif table:
tables = {table}
denied = set()
for table_ in tables:
schema_perm = self.get_schema_perm(database, schema=table_.schema)
if not (schema_perm and self.can_access("schema_access", schema_perm)):
datasources = SqlaTable.query_datasources_by_name(
self.get_session, database, table_.table, schema=table_.schema
)
# Access to any datasource is suffice.
for datasource_ in datasources:
if self.can_access("datasource_access", datasource_.perm):
break
else:
denied.add(table_)
if denied:
raise SupersetSecurityException(
self.get_table_access_error_object(denied)
)
if datasource or query_context or viz:
if query_context:
datasource = query_context.datasource
elif viz:
datasource = viz.datasource
assert datasource
if not (
self.can_access_schema(datasource)
or self.can_access("datasource_access", datasource.perm or "")
):
raise SupersetSecurityException(
self.get_datasource_access_error_object(datasource)
)
def get_user_by_username(
self, username: str, session: Session = None
) -> Optional[User]:
"""
Retrieves a user by it's username case sensitive. Optional session parameter
utility method normally useful for celery tasks where the session
need to be scoped
"""
session = session or self.get_session
return (
session.query(self.user_model)
.filter(self.user_model.username == username)
.one_or_none()
)
def get_rls_filters(self, table: "BaseDatasource") -> List[SqlaQuery]:
"""
Retrieves the appropriate row level security filters for the current user and
the passed table.
:param table: The table to check against
:returns: A list of filters
"""
if hasattr(g, "user") and hasattr(g.user, "id"):
from superset.connectors.sqla.models import (
RLSFilterRoles,
RLSFilterTables,
RowLevelSecurityFilter,
)
user_roles = (
self.get_session.query(assoc_user_role.c.role_id)
.filter(assoc_user_role.c.user_id == g.user.id)
.subquery()
)
regular_filter_roles = (
self.get_session.query(RLSFilterRoles.c.rls_filter_id)
.join(RowLevelSecurityFilter)
.filter(
RowLevelSecurityFilter.filter_type
== RowLevelSecurityFilterType.REGULAR
)
.filter(RLSFilterRoles.c.role_id.in_(user_roles))
.subquery()
)
base_filter_roles = (
self.get_session.query(RLSFilterRoles.c.rls_filter_id)
.join(RowLevelSecurityFilter)
.filter(
RowLevelSecurityFilter.filter_type
== RowLevelSecurityFilterType.BASE
)
.filter(RLSFilterRoles.c.role_id.in_(user_roles))
.subquery()
)
filter_tables = (
self.get_session.query(RLSFilterTables.c.rls_filter_id)
.filter(RLSFilterTables.c.table_id == table.id)
.subquery()
)
query = (
self.get_session.query(
RowLevelSecurityFilter.id,
RowLevelSecurityFilter.group_key,
RowLevelSecurityFilter.clause,
)
.filter(RowLevelSecurityFilter.id.in_(filter_tables))
.filter(
or_(
and_(
RowLevelSecurityFilter.filter_type
== RowLevelSecurityFilterType.REGULAR,
RowLevelSecurityFilter.id.in_(regular_filter_roles),
),
and_(
RowLevelSecurityFilter.filter_type
== RowLevelSecurityFilterType.BASE,
RowLevelSecurityFilter.id.notin_(base_filter_roles),
),
)
)
)
return query.all()
return []
def get_rls_ids(self, table: "BaseDatasource") -> List[int]:
"""
Retrieves the appropriate row level security filters IDs for the current user
and the passed table.
:param table: The table to check against
:returns: A list of IDs
"""
ids = [f.id for f in self.get_rls_filters(table)]
ids.sort() # Combinations rather than permutations
return ids
| 35.815087
| 88
| 0.62115
|
210d95863df90ae0f76133e2c5e03e4246bcc914
| 291
|
py
|
Python
|
postoptimizer/subreddits/apps.py
|
mjkaufer/PostOptimizer
|
7f0c12d5832c10e5dce6c059bbd958b8737533e4
|
[
"MIT"
] | null | null | null |
postoptimizer/subreddits/apps.py
|
mjkaufer/PostOptimizer
|
7f0c12d5832c10e5dce6c059bbd958b8737533e4
|
[
"MIT"
] | null | null | null |
postoptimizer/subreddits/apps.py
|
mjkaufer/PostOptimizer
|
7f0c12d5832c10e5dce6c059bbd958b8737533e4
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from django.apps import AppConfig
class SubredditsConfig(AppConfig):
name = 'postoptimizer.subreddits'
verbose_name = 'subreddit stats stuff'
def ready(self):
# from .models import SubredditStats
pass
| 19.4
| 44
| 0.683849
|
a6ab15c2903f3b2f130c56a38afb23e92c3c2863
| 12,984
|
py
|
Python
|
threedi_custom_stats/presets.py
|
threedi/beta-plugins
|
530a5542deda73201626f7a429f87ce64cbac51a
|
[
"MIT"
] | 1
|
2022-02-14T10:31:51.000Z
|
2022-02-14T10:31:51.000Z
|
threedi_custom_stats/presets.py
|
threedi/beta-plugins
|
530a5542deda73201626f7a429f87ce64cbac51a
|
[
"MIT"
] | 11
|
2019-04-08T14:11:45.000Z
|
2021-07-02T14:28:04.000Z
|
threedi_custom_stats/presets.py
|
threedi/beta-plugins
|
530a5542deda73201626f7a429f87ce64cbac51a
|
[
"MIT"
] | null | null | null |
from typing import List
try:
from .threedi_result_aggregation import *
# from .aggregation_classes import *
# from .constants import *
from .style import *
except ImportError:
from threedi_result_aggregation import *
# from constants import *
from style import *
class Preset:
def __init__(self,
name: str,
description: str = '',
aggregations=None,
resample_point_layer: bool = False,
flowlines_style: Style = None,
cells_style: Style = None,
nodes_style: Style = None,
flowlines_style_param_values: dict = None,
cells_style_param_values: dict = None,
nodes_style_param_values: dict = None
):
if aggregations is None:
aggregations = list()
self.name = name
self.description = description
self.__aggregations = aggregations
self.resample_point_layer = resample_point_layer
self.flowlines_style = flowlines_style
self.cells_style = cells_style
self.nodes_style = nodes_style
self.flowlines_style_param_values = flowlines_style_param_values
self.cells_style_param_values = cells_style_param_values
self.nodes_style_param_values = nodes_style_param_values
def add_aggregation(self, aggregation: Aggregation):
self.__aggregations.append(aggregation)
def aggregations(self):
return self.__aggregations
# No preset selected
NO_PRESET = Preset(name='(no preset selected)',
aggregations=[]
)
# Maximum water level
max_wl_aggregations = [Aggregation(variable=AGGREGATION_VARIABLES.get_by_short_name('s1'),
method=AGGREGATION_METHODS.get_by_short_name('max'),
)
]
MAX_WL_PRESETS = Preset(name='Maximum water level',
description='Calculates the maximum water level for nodes and cells within the chosen '
'time filter.',
aggregations=max_wl_aggregations,
nodes_style=STYLE_SINGLE_COLUMN_GRADUATED_NODE,
cells_style=STYLE_SINGLE_COLUMN_GRADUATED_CELL,
nodes_style_param_values={'column': 's1_max'},
cells_style_param_values={'column': 's1_max'}
)
# Change in water level
change_wl_aggregations = [Aggregation(variable=AGGREGATION_VARIABLES.get_by_short_name('s1'),
method=AGGREGATION_METHODS.get_by_short_name('first'),
),
Aggregation(variable=AGGREGATION_VARIABLES.get_by_short_name('s1'),
method=AGGREGATION_METHODS.get_by_short_name('last'),
),
Aggregation(variable=AGGREGATION_VARIABLES.get_by_short_name('s1'),
method=AGGREGATION_METHODS.get_by_short_name('min'),
),
Aggregation(variable=AGGREGATION_VARIABLES.get_by_short_name('s1'),
method=AGGREGATION_METHODS.get_by_short_name('max'),
)
]
CHANGE_WL_PRESETS = Preset(name='Change in water level',
description='Calculates the difference in water level (last - first). In the styling '
'NULL values (when the cell is dry) are replaced by the cells lowest '
'pixel elevation (z_coordinate).',
aggregations=change_wl_aggregations,
cells_style=STYLE_CHANGE_WL,
cells_style_param_values={'first': 's1_first', 'last': 's1_last'}
)
# Flow pattern
flow_pattern_aggregations = [Aggregation(variable=AGGREGATION_VARIABLES.get_by_short_name('q_out_x'),
method=AGGREGATION_METHODS.get_by_short_name('sum'),
),
Aggregation(variable=AGGREGATION_VARIABLES.get_by_short_name('q_out_y'),
method=AGGREGATION_METHODS.get_by_short_name('sum'),
)]
FLOW_PATTERN_PRESETS = Preset(name='Flow pattern',
description='Generates a flow pattern map. The aggregation calculates total outflow per '
'node in x and y directions, resampled to grid_space. In the styling that is '
'applied, the shade of blue and the rotation of the arrows are based on the '
'resultant of these two.\n\n'
'To save the output to disk, save to GeoPackage (Export > Save features as),'
'copy the styling to the new layer (Styles > Copy Style / Paste Style). Then '
'save the styling as default in the GeoPackage (Properties > Style > Save as '
'Default > Save default style to Datasource Database). ',
aggregations=flow_pattern_aggregations,
resample_point_layer=True,
nodes_style=STYLE_VECTOR,
nodes_style_param_values={'x': 'q_out_x_sum',
'y': 'q_out_y_sum'}
)
# Timestep reduction analysis
ts_reduction_analysis_aggregations = [Aggregation(variable=AGGREGATION_VARIABLES.get_by_short_name('ts_max'),
method=AGGREGATION_METHODS.get_by_short_name('below_thres'),
threshold=1.0
),
Aggregation(variable=AGGREGATION_VARIABLES.get_by_short_name('ts_max'),
method=AGGREGATION_METHODS.get_by_short_name('below_thres'),
threshold=3.0
),
Aggregation(variable=AGGREGATION_VARIABLES.get_by_short_name('ts_max'),
method=AGGREGATION_METHODS.get_by_short_name('below_thres'),
threshold=5.0
)]
TS_REDUCTION_ANALYSIS_PRESETS = Preset(name='Timestep reduction analysis',
description='Timestep reduction analysis calculates the % of time that the flow '
'through each flowline limits the calculation timestep to below 1, '
'3, '
'or 5 seconds. \n\n'
'The styling highlights the flowlines that have a timestep of \n'
' < 1 s for 10% of the time and/or\n'
' < 3 s for 50% of the time and/or\n'
' < 5 s for 80% of the time;'
'\n\n'
'Replacing these flowlines with orifices may speed up the '
'simulation '
'without large impact on the results. Import the highlighted lines '
'from the aggregation result into your 3Di spatialite as '
'\'ts_reducers\' and use this query to replace line elements ('
'example '
'for v2_pipe):\n\n'
'-- Add orifice:\n'
'INSERT INTO v2_orifice(display_name, code, crest_level, sewerage, '
'cross_section_definition_id, friction_value, friction_type, '
'discharge_coefficient_positive, discharge_coefficient_negative, '
'zoom_category, crest_type, connection_node_start_id, '
'connection_node_end_id)\n'
'SELECT display_name, code, max(invert_level_start_point, '
'invert_level_end_point) AS crest_level, TRUE AS sewerage, '
'cross_section_definition_id, friction_value, friction_type, '
'1 AS discharge_coefficient_positive, '
'1 AS discharge_coefficient_negative, zoom_category, '
'4 AS crest_type, '
'connection_node_start_id, connection_node_end_id\n'
'FROM v2_pipe\n'
'WHERE id IN (SELECT spatialite_id FROM ts_reducers WHERE '
'content_type=\'v2_pipe\');\n\n'
'-- Remove pipe\n'
'DELETE FROM v2_pipe WHERE id IN (SELECT spatialite_id FROM '
'ts_reducers WHERE content_type=\'v2_pipe\');',
aggregations=ts_reduction_analysis_aggregations,
flowlines_style=STYLE_TIMESTEP_REDUCTION_ANALYSIS,
flowlines_style_param_values={'col1': 'ts_max_below_thres_1_0',
'col2': 'ts_max_below_thres_3_0',
'col3': 'ts_max_below_thres_5_0'
}
)
# Source or sink (mm)
source_sink_mm_aggregations = [Aggregation(variable=AGGREGATION_VARIABLES.get_by_short_name('rain_depth'),
method=AGGREGATION_METHODS.get_by_short_name('sum')
),
Aggregation(
variable=AGGREGATION_VARIABLES.get_by_short_name('infiltration_rate_simple_mm'),
method=AGGREGATION_METHODS.get_by_short_name('sum')
),
Aggregation(variable=AGGREGATION_VARIABLES.get_by_short_name('intercepted_volume_mm'),
method=AGGREGATION_METHODS.get_by_short_name('last')
)
]
SOURCE_SINK_MM_PRESETS = Preset(name='Source or sink (mm)',
description='Calculate by how many mm a node or cell is a net source or sink.'
'A positive results indicates a source, negative result a sink.',
aggregations=source_sink_mm_aggregations,
cells_style=STYLE_BALANCE,
cells_style_param_values={'positive_col1': 'rain_depth_sum',
'positive_col2': '',
'positive_col3': '',
'negative_col1': 'infiltration_rate_simple_mm_sum',
'negative_col2': 'intercepted_volume_mm_last',
'negative_col3': '',
}
)
PRESETS = [NO_PRESET, MAX_WL_PRESETS, CHANGE_WL_PRESETS, SOURCE_SINK_MM_PRESETS, FLOW_PATTERN_PRESETS,
TS_REDUCTION_ANALYSIS_PRESETS]
| 64.277228
| 120
| 0.456716
|
1da3e643982e4946e8cf38ed087746bba26268d4
| 5,369
|
py
|
Python
|
docs/conf.py
|
ynop/evalmate
|
0274eb79528cee42405778c539ae8f576a48efb4
|
[
"MIT"
] | 2
|
2019-08-16T14:49:20.000Z
|
2020-11-15T18:33:33.000Z
|
docs/conf.py
|
ynop/evalmate
|
0274eb79528cee42405778c539ae8f576a48efb4
|
[
"MIT"
] | 3
|
2018-12-06T14:33:30.000Z
|
2018-12-19T13:54:12.000Z
|
docs/conf.py
|
ynop/evalmate
|
0274eb79528cee42405778c539ae8f576a48efb4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# evalmate documentation build configuration file, created by
# sphinx-quickstart on Tue Nov 21 16:54:44 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import evalmate
import sphinx_rtd_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon']
napoleon_use_ivar = True
napoleon_use_admonition_for_notes = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'evalmate'
copyright = '2017, evalmate'
author = 'Matthias Büchi, Andreas Ahlenstorf'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3.0'
# The full version, including alpha/beta/rc tags.
release = '0.3.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'evalmatedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'evalmate.tex', 'evalmate Documentation',
'buec', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'evalmate', 'evalmate Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'evalmate', 'evalmate Documentation',
author, 'evalmate', 'One line description of project.',
'Miscellaneous'),
]
| 30.856322
| 79
| 0.681505
|
98d6ad51c63441bd76d0ec91e7144d529b337d34
| 4,664
|
py
|
Python
|
ros/src/tl_detector/light_classification/trafficLightClassifierClass_reduced.py
|
mriosrivas/CarND-Capstone
|
798235066bdecc91f9b00e663a6bb6f697f6f302
|
[
"MIT"
] | null | null | null |
ros/src/tl_detector/light_classification/trafficLightClassifierClass_reduced.py
|
mriosrivas/CarND-Capstone
|
798235066bdecc91f9b00e663a6bb6f697f6f302
|
[
"MIT"
] | null | null | null |
ros/src/tl_detector/light_classification/trafficLightClassifierClass_reduced.py
|
mriosrivas/CarND-Capstone
|
798235066bdecc91f9b00e663a6bb6f697f6f302
|
[
"MIT"
] | null | null | null |
import numpy as np
import os
import sys
import tensorflow as tf
import time
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
import cv2
#Loading label map
#label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
#categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
#category_index = label_map_util.create_category_index(categories)
class TrafficLightsClassifier(object):
def __init__(self):
#Object Detection Imports
PATH_TO_OBJECT_DETECTION = '/home/student/Desktop/CarND-Capstone/ros/src/tl_detector/light_classification/tensorflow/models/research/'
sys.path.insert(0, PATH_TO_OBJECT_DETECTION)
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
#Model Preparation
ssd_inception_sim_model = '/home/student/Desktop/CarND-Capstone/ros/src/tl_detector/light_classification/tensorflow/models/research/frozen_models/frozen_sim_inception/frozen_inference_graph.pb'
#ssd_inception_real_model = 'frozen_models/frozen_real_inception_6561/frozen_inference_graph.pb'
PATH_TO_LABELS = '/home/student/Desktop/CarND-Capstone/ros/src/tl_detector/light_classification/tensorflow/models/research/label_map.pbtxt'
NUM_CLASSES = 14
self.label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
self.categories = label_map_util.convert_label_map_to_categories(self.label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
self.category_index = label_map_util.create_category_index(self.categories)
print("Success ")
#def setModel(self, model):
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
self.od_graph_def = tf.GraphDef()
with tf.gfile.GFile(ssd_inception_sim_model, 'rb') as fid:
serialized_graph = fid.read()
self.od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(self.od_graph_def, name='')
print("Success 2")
#def startSession(self):
with self.detection_graph.as_default():
with tf.Session(graph=self.detection_graph) as self.sess:
# Definite input and output Tensors for detection_graph
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
print("Success 3")
def load_image_into_numpy_array(self, image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)
print("Success 4")
def clasifyImage(self, img):
image = Image.open(img)
print(image.size)
#image = cv2.imread(img)
#print("Image lenght is: ", len(image))
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = self.load_image_into_numpy_array(image)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
time0 = time.time()
print("Before sess.run \n")
# Actual detection.
(boxes, scores, classes, num) = self.sess.run(
[self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],
feed_dict={self.image_tensor: image_np_expanded})
time1 = time.time()
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
classes = np.squeeze(classes).astype(np.int32)
return scores, classes
if __name__ == "__main__":
classifier = TrafficLightsClassifier()
#classifier.setModel(ssd_inception_sim_model)
#classifier.startSession()
img = '/home/student/Desktop/Traffic_Lights_Classifier/TrafficLight_Detection-TensorFlowAPI/test_images_sim/left0040.jpg'
traffic_light_dict = {1 : 2, 2: 0, 3: 1, 4: 4 }
scores, classes = classifier.clasifyImage(img)
for i in range(3):
print(scores[i], traffic_light_dict[classes[i]])
#Debugging purposes
#traffic_light_decoder = {2: 'GREEN', 0: 'RED', 1: 'YELLOW', 4: 'UNKNOWN'}
#for i in range(3):
# print(scores[i], traffic_light_decoder[traffic_light_dict[classes[i]]])
| 35.333333
| 195
| 0.777444
|
ce7f739158aae746fb1a8407f4cde1d7ded6d45c
| 13,132
|
py
|
Python
|
test/test_ferrybox_bad_data_times_to_status.py
|
Swiss-Polar-Institute/science-data-utils
|
6a85570ee586fa1ba1644ba2b1c9dea3a5257eae
|
[
"MIT"
] | null | null | null |
test/test_ferrybox_bad_data_times_to_status.py
|
Swiss-Polar-Institute/science-data-utils
|
6a85570ee586fa1ba1644ba2b1c9dea3a5257eae
|
[
"MIT"
] | null | null | null |
test/test_ferrybox_bad_data_times_to_status.py
|
Swiss-Polar-Institute/science-data-utils
|
6a85570ee586fa1ba1644ba2b1c9dea3a5257eae
|
[
"MIT"
] | null | null | null |
import unittest
import ferrybox_bad_data_times_to_status
import datetime
def text_to_dt(t):
"""Use the function to convert date time strings to python datetime format in the input and expected output data
in the tests below. """
return datetime.datetime.strptime(t, '%Y-%m-%d %H:%M:%S')
class TestFerryboxBadDataTimesToStatus(unittest.TestCase):
maxDiff = None # allows full output of any failed tests to be printed
def test_change_format_from_input_to_datetime(self):
"""Test simple conversion from input off periods in format date, time, time to python datetime"""
pump_log_input = [['2016-12-24', '11:30:00', '12:30:00'],
['2016-12-25', '23:45:00', '23:50:00'],
]
expected = [[text_to_dt('2016-12-24 11:30:00'), text_to_dt('2016-12-24 12:30:00')],
[text_to_dt('2016-12-25 23:45:00'), text_to_dt('2016-12-25 23:50:00')],
]
actual = ferrybox_bad_data_times_to_status.change_format_from_input_to_datetime(pump_log_input)
self.assertListEqual(actual, expected)
def test_collapse_same_day_simple(self):
"""Test code to convert mulitple lines that run where the time periods could be combined (such as over
multiple days) because there are no gaps.
First test of simple rows where this case is not included. """
off_input = [[text_to_dt('2016-12-24 11:30:00'), text_to_dt('2016-12-24 12:30:00')],
[text_to_dt('2016-12-26 01:00:00'), text_to_dt('2016-12-26 14:30:00')],
[text_to_dt('2016-12-27 13:00:00'), text_to_dt('2016-12-27 15:45:00')],
]
expected = [[text_to_dt('2016-12-24 11:30:00'), text_to_dt('2016-12-24 12:30:00')],
[text_to_dt('2016-12-26 01:00:00'), text_to_dt('2016-12-26 14:30:00')],
[text_to_dt('2016-12-27 13:00:00'), text_to_dt('2016-12-27 15:45:00')],
]
actual = ferrybox_bad_data_times_to_status.collapse_same_day_off(off_input)
self.assertListEqual(actual, expected)
def test_collapse_same_day_off(self):
"""Test code to convert mulitple lines that run where the time periods could be combined (such as over
multiple days) because there are no gaps.
Test where there are only two rows that should be combined into one. """
off_input = [[text_to_dt('2016-12-24 11:30:00'), text_to_dt('2016-12-24 12:30:00')],
[text_to_dt('2016-12-25 20:45:00'), text_to_dt('2016-12-25 23:59:59')],
[text_to_dt('2016-12-26 00:00:00'), text_to_dt('2016-12-26 14:30:00')],
[text_to_dt('2016-12-27 13:00:00'), text_to_dt('2016-12-27 15:45:00')],
]
expected = [[text_to_dt('2016-12-24 11:30:00'), text_to_dt('2016-12-24 12:30:00')],
[text_to_dt('2016-12-25 20:45:00'), text_to_dt('2016-12-26 14:30:00')],
[text_to_dt('2016-12-27 13:00:00'), text_to_dt('2016-12-27 15:45:00')],
]
actual = ferrybox_bad_data_times_to_status.collapse_same_day_off(off_input)
self.assertListEqual(actual, expected)
def test_collapse_same_day_off_multi(self):
"""Test code to convert mulitple lines that run where the time periods could be combined (such as over
multiple days) because there are no gaps.
Test where there are multiple rows that should be combined into one row. """
off_input = [[text_to_dt('2016-12-24 11:30:00'), text_to_dt('2016-12-24 12:30:00')],
[text_to_dt('2016-12-25 20:45:00'), text_to_dt('2016-12-25 23:59:59')],
[text_to_dt('2016-12-26 00:00:00'), text_to_dt('2016-12-26 23:59:59')],
[text_to_dt('2016-12-27 00:00:00'), text_to_dt('2016-12-27 14:30:00')],
[text_to_dt('2016-12-28 13:00:00'), text_to_dt('2016-12-28 15:45:00')],
]
expected = [[text_to_dt('2016-12-24 11:30:00'), text_to_dt('2016-12-24 12:30:00')],
[text_to_dt('2016-12-25 20:45:00'), text_to_dt('2016-12-27 14:30:00')],
[text_to_dt('2016-12-28 13:00:00'), text_to_dt('2016-12-28 15:45:00')],
]
actual = ferrybox_bad_data_times_to_status.collapse_same_day_off(off_input)
self.assertListEqual(actual, expected)
def test_collapse_same_day_off_one_consecutive(self):
"""Test code to convert mulitple lines that run where the time periods could be combined (such as over multiple
days) because there are no gaps.
Test where there are two rows that should be combined into one row, but because they are consecutive rather
than run over midnight and consecutive. """
off_input = [[text_to_dt('2016-12-24 11:30:00'), text_to_dt('2016-12-24 12:30:00')],
[text_to_dt('2016-12-25 12:40:01'), text_to_dt('2016-12-25 12:50:10')],
[text_to_dt('2016-12-25 12:50:11'), text_to_dt('2016-12-25 13:30:00')],
[text_to_dt('2016-12-26 08:00:00'), text_to_dt('2016-12-26 15:00:00')]
]
expected = [[text_to_dt('2016-12-24 11:30:00'), text_to_dt('2016-12-24 12:30:00')],
[text_to_dt('2016-12-25 12:40:01'), text_to_dt('2016-12-25 13:30:00')],
[text_to_dt('2016-12-26 08:00:00'), text_to_dt('2016-12-26 15:00:00')]
]
actual = ferrybox_bad_data_times_to_status.collapse_same_day_off(off_input)
self.assertListEqual(actual, expected)
def test_collapse_same_day_off_multi_consecutive(self):
"""Test code to convert mulitple lines that run where the time periods could be combined (such as over multiple days) because there are no gaps.
Test where there are multiple rows that should be combined into one row, but because they are consecutive
rather than run over midnight and consecutive. """
off_input = [[text_to_dt('2016-12-24 11:30:00'), text_to_dt('2016-12-24 12:30:00')],
[text_to_dt('2016-12-25 12:40:01'), text_to_dt('2016-12-25 12:50:10')],
[text_to_dt('2016-12-25 12:50:11'), text_to_dt('2016-12-25 13:30:05')],
[text_to_dt('2016-12-25 13:30:06'), text_to_dt('2016-12-25 16:00:00')],
[text_to_dt('2016-12-26 08:00:00'), text_to_dt('2016-12-26 15:00:00')]
]
expected = [[text_to_dt('2016-12-24 11:30:00'), text_to_dt('2016-12-24 12:30:00')],
[text_to_dt('2016-12-25 12:40:01'), text_to_dt('2016-12-25 16:00:00')],
[text_to_dt('2016-12-26 08:00:00'), text_to_dt('2016-12-26 15:00:00')]
]
actual = ferrybox_bad_data_times_to_status.collapse_same_day_off(off_input)
self.assertListEqual(actual, expected)
def test_correct_off_seconds_same_minute(self):
"""Test code where the off periods have the same start and end time to the nearest minute. The end time of
the off period in these cases, should have 59 seconds added to them. Test covers the cases where there are no
rows that meet these criteria and also rows where the start and end is the same but to the nearest second."""
minute_input = [[text_to_dt('2016-12-25 23:45:00'), text_to_dt('2016-12-25 23:45:00')],
[text_to_dt('2016-12-26 06:10:00'), text_to_dt('2016-12-26 06:10:00')],
[text_to_dt('2016-12-27 19:00:00'), text_to_dt('2016-12-27 20:00:00')],
[text_to_dt('2016-12-28 21:00:05'), text_to_dt('2016-12-28 21:00:05')]
]
expected = [[text_to_dt('2016-12-25 23:45:00'), text_to_dt('2016-12-25 23:45:59')],
[text_to_dt('2016-12-26 06:10:00'), text_to_dt('2016-12-26 06:10:59')],
[text_to_dt('2016-12-27 19:00:00'), text_to_dt('2016-12-27 20:00:00')],
[text_to_dt('2016-12-28 21:00:05'), text_to_dt('2016-12-28 21:00:05')]
]
actual = ferrybox_bad_data_times_to_status.correct_off_seconds_same_minute(minute_input)
self.assertListEqual(actual, expected)
def test_process_to_on_off(self):
"""Test the code that converts lines of off periods to on and off rows.
First simple case. """
off_input = [[text_to_dt('2016-12-25 23:45:00'), text_to_dt('2016-12-25 23:50:00')],
[text_to_dt('2016-12-26 06:10:00'), text_to_dt('2016-12-26 20:40:00')],
[text_to_dt('2016-12-27 19:00:00'), text_to_dt('2016-12-27 20:00:00')]
]
expected = [[text_to_dt('2016-12-25 23:45:00'), text_to_dt('2016-12-25 23:50:00'), 'off'],
[text_to_dt('2016-12-25 23:50:00'), text_to_dt('2016-12-26 06:10:00'), 'on'],
[text_to_dt('2016-12-26 06:10:00'), text_to_dt('2016-12-26 20:40:00'), 'off'],
[text_to_dt('2016-12-26 20:40:00'), text_to_dt('2016-12-27 19:00:00'), 'on'],
[text_to_dt('2016-12-27 19:00:00'), text_to_dt('2016-12-27 20:00:00'), 'off'],
]
actual = ferrybox_bad_data_times_to_status.process_to_on_off(off_input)
self.assertListEqual(actual, expected)
def test_combine_multiday_rows_join(self):
"""Test the code that converts lines of off periods to on and off rows.
Case where there are rows to combine. """
off_input = [[text_to_dt('2016-12-24 11:30:00'), text_to_dt('2016-12-24 12:30:00')],
[text_to_dt('2016-12-25 23:45:00'), text_to_dt('2016-12-25 23:50:00')],
[text_to_dt('2016-12-26 06:10:00'), text_to_dt('2016-12-27 19:30:00')],
[text_to_dt('2016-12-28 20:00:00'), text_to_dt('2016-12-28 21:00:00')],
]
expected = [[text_to_dt('2016-12-24 11:30:00'), text_to_dt('2016-12-24 12:30:00'), 'off'],
[text_to_dt('2016-12-24 12:30:00'), text_to_dt('2016-12-25 23:45:00'), 'on'],
[text_to_dt('2016-12-25 23:45:00'), text_to_dt('2016-12-25 23:50:00'), 'off'],
[text_to_dt('2016-12-25 23:50:00'), text_to_dt('2016-12-26 06:10:00'), 'on'],
[text_to_dt('2016-12-26 06:10:00'), text_to_dt('2016-12-27 19:30:00'), 'off'],
[text_to_dt('2016-12-27 19:30:00'), text_to_dt('2016-12-28 20:00:00'), 'on'],
[text_to_dt('2016-12-28 20:00:00'), text_to_dt('2016-12-28 21:00:00'), 'off'],
]
actual = ferrybox_bad_data_times_to_status.process_to_on_off(off_input)
self.assertListEqual(actual, expected)
def test_process_seconds_and_to_on_off(self):
"""Tests the process of combining rows, converting the seconds of the end of an off row and processing to on and off rows."""
minute_input = [[text_to_dt('2016-12-25 11:00:00'), text_to_dt('2016-12-25 12:00:00')],
[text_to_dt('2016-12-25 23:45:00'), text_to_dt('2016-12-25 23:45:00')],
[text_to_dt('2016-12-26 06:10:00'), text_to_dt('2016-12-26 06:10:00')],
[text_to_dt('2016-12-27 19:00:00'), text_to_dt('2016-12-27 23:59:59')],
[text_to_dt('2016-12-28 00:00:00'), text_to_dt('2016-12-28 21:00:05')],
[text_to_dt('2016-12-28 21:00:06'), text_to_dt('2016-12-28 22:00:00')],
[text_to_dt('2016-12-29 08:00:00'), text_to_dt('2016-12-29 12:00:00')]
]
expected = [[text_to_dt('2016-12-25 11:00:00'), text_to_dt('2016-12-25 12:00:00'), 'off'],
[text_to_dt('2016-12-25 12:00:00'), text_to_dt('2016-12-25 23:45:00'), 'on'],
[text_to_dt('2016-12-25 23:45:00'), text_to_dt('2016-12-25 23:45:59'), 'off'],
[text_to_dt('2016-12-25 23:45:59'), text_to_dt('2016-12-26 06:10:00'), 'on'], # the start time here needs correcting when the code changes
[text_to_dt('2016-12-26 06:10:00'), text_to_dt('2016-12-26 06:10:59'), 'off'],
[text_to_dt('2016-12-26 06:10:59'), text_to_dt('2016-12-27 19:00:00'), 'on'], # the start time here needs correcting when the code changes
[text_to_dt('2016-12-27 19:00:00'), text_to_dt('2016-12-28 22:00:00'), 'off'],
[text_to_dt('2016-12-28 22:00:00'), text_to_dt('2016-12-29 08:00:00'), 'on'],
[text_to_dt('2016-12-29 08:00:00'), text_to_dt('2016-12-29 12:00:00'), 'off']
]
collapsed_list = ferrybox_bad_data_times_to_status.collapse_same_day_off(minute_input)
correct_seconds = ferrybox_bad_data_times_to_status.correct_off_seconds_same_minute(collapsed_list)
actual = ferrybox_bad_data_times_to_status.process_to_on_off(correct_seconds)
self.assertListEqual(actual, expected)
if __name__ == '__main__':
unittest.main()
| 53.6
| 162
| 0.594274
|
515d7ec9e5043b431c7bd4701008f3958c9d8b74
| 1,214
|
py
|
Python
|
PythonTest/ShowMeTheCode/find_links.py
|
qianhk/FeiPython
|
c87578d3c04b7345a99fef7390c8ea12c6f2c716
|
[
"Apache-2.0"
] | null | null | null |
PythonTest/ShowMeTheCode/find_links.py
|
qianhk/FeiPython
|
c87578d3c04b7345a99fef7390c8ea12c6f2c716
|
[
"Apache-2.0"
] | 15
|
2019-11-18T06:09:50.000Z
|
2022-03-02T02:55:54.000Z
|
PythonTest/ShowMeTheCode/find_links.py
|
qianhk/FeiPython
|
c87578d3c04b7345a99fef7390c8ea12c6f2c716
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
import requests
import pyquery
#https://pythonhosted.org/pyquery/api.html
HttpUserAgent = r"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36"
def find_links(url):
list = []
html = get_html(url)
doc = pyquery.PyQuery(html)
a_tags = doc.find('a')
for a in a_tags.items():
hrefAttr = a.attr('href')
if isinstance(hrefAttr, str) and len(hrefAttr) > 0:
if hrefAttr.startswith('http'):
list.append(hrefAttr)
elif hrefAttr.startswith('/'):
list.append('' + hrefAttr)
# for item in list:
# print item
imgList = []
imgTags = doc.find('img').items()
for imgTag in imgTags:
imgSrc = imgTag.attr['src']
if isinstance(imgSrc, str) and len(imgSrc) > 0 and imgSrc.startswith('http'):
imgList.append(imgSrc)
for imgUrl in imgList:
print imgUrl
def get_html(url):
response = requests.get(url, headers={'User-Agent': HttpUserAgent})
return response.text
if __name__ == '__main__':
url = 'http://tieba.baidu.com/p/2166231880'
find_links(url)
| 27.590909
| 140
| 0.621911
|
680db62b98791017aacdb6c127b3c774a57e3464
| 1,996
|
py
|
Python
|
question10.py
|
znalbert/alg_think_mod_3
|
6599109811d41d7ed4f6136367b850699d3c47c2
|
[
"MIT"
] | null | null | null |
question10.py
|
znalbert/alg_think_mod_3
|
6599109811d41d7ed4f6136367b850699d3c47c2
|
[
"MIT"
] | null | null | null |
question10.py
|
znalbert/alg_think_mod_3
|
6599109811d41d7ed4f6136367b850699d3c47c2
|
[
"MIT"
] | null | null | null |
"""
Assignment 3 - Question 1
"""
import matplotlib.pyplot as plt
import alg_project3_viz as viz
import alg_project3_solution as sol
data_table = viz.load_data_table(viz.DATA_111_URL)
hier_cluster_list = sol.make_data_list(data_table)
kmeans_cluster_list = sol.make_data_list(data_table)
def compute_hier_distortions(cluster_list):
""" list -> list
Takes a list of cluster objects and returns the list of distortions as that
list is further clustered from 20 down to 5 clusters.
"""
distortions = []
for iteration in range(20, 5, -1):
new_list = sol.hierarchical_clustering(cluster_list, iteration)
cluster_list = new_list
distortions.append(sol.compute_distortion(new_list, data_table))
distortions.reverse()
return distortions
def compute_kmeans_distortions(cluster_list):
""" list -> list
Takes a list of cluster objects and iteratively clusters the data further,
while calculating the distortion at each iteration. Returns a list of
distortion values.
"""
distortions = []
for iteration in range(6, 21):
new_list = sol.kmeans_clustering(cluster_list, iteration, 5)
distortions.append(sol.compute_distortion(new_list, data_table))
return distortions
def plot_distortions(hierarchical_data, kmeans_data):
"""
Plot an example with two curves with legends
"""
y_values = range(6, 21)
plt.plot(y_values, hierarchical_data, '-b', label='hierarchical_clustering')
plt.plot(y_values, kmeans_data, '-r', label='kmeans_clustering')
plt.legend(loc='upper right')
plt.ylabel('Distortion')
plt.xlabel('Number of Clusters')
plt.grid(True)
plt.title('Comparison of Function Distortions for 111 Data Points\nPython Desktop Environment\n')
plt.show()
hier_distortions = compute_hier_distortions(hier_cluster_list)
kmeans_distortions = compute_kmeans_distortions(kmeans_cluster_list)
plot_distortions(hier_distortions, kmeans_distortions)
| 29.791045
| 101
| 0.73998
|
7ca2110bb65843d30c5247317ffbb1f948e3acc9
| 3,636
|
py
|
Python
|
lte/gateway/python/magma/mobilityd/uplink_gw.py
|
gurrapualt/magma
|
13e05788fa6c40293a58b6e03cfb394bb79fa98f
|
[
"BSD-3-Clause"
] | null | null | null |
lte/gateway/python/magma/mobilityd/uplink_gw.py
|
gurrapualt/magma
|
13e05788fa6c40293a58b6e03cfb394bb79fa98f
|
[
"BSD-3-Clause"
] | 112
|
2020-09-03T06:41:43.000Z
|
2022-03-31T12:07:08.000Z
|
lte/gateway/python/magma/mobilityd/uplink_gw.py
|
gurrapualt/magma
|
13e05788fa6c40293a58b6e03cfb394bb79fa98f
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import ipaddress
import logging
import netifaces
from typing import MutableMapping, Optional, List
from lte.protos.mobilityd_pb2 import GWInfo, IPAddress
NO_VLAN = "NO_VLAN"
def _get_vlan_key(vlan: Optional[str]) -> str:
if vlan is None or vlan == '' or vlan == NO_VLAN or vlan == "0":
return NO_VLAN
if int(vlan) < 0 or int(vlan) > 4095:
raise InvalidVlanId("invalid vlan: " + vlan)
return vlan
# TODO: move helper class to separate directory.
class UplinkGatewayInfo:
def __init__(self, gw_info_map: MutableMapping[str, GWInfo]):
"""
This maintains uptodate information about upstream GW.
Args:
gw_info_map: map to store GW info.
"""
self._backing_map = gw_info_map
# TODO: change vlan_id type to int
def get_gw_ip(self, vlan_id: Optional[str] = "") -> Optional[str]:
vlan_key = _get_vlan_key(vlan_id)
if vlan_key in self._backing_map:
gw_info = self._backing_map.get(vlan_key)
ip = ipaddress.ip_address(gw_info.ip.address)
return str(ip)
def read_default_gw(self):
gws = netifaces.gateways()
logging.info("Using GW info: %s", gws)
if gws is not None:
default_gw = gws['default']
if default_gw is not None and \
default_gw[netifaces.AF_INET] is not None:
self.update_ip(default_gw[netifaces.AF_INET][0])
def update_ip(self, ip: str, vlan_id: Optional[str] = ""):
vlan_key = _get_vlan_key(vlan_id)
logging.info("GW IP[%s]: %s" % (vlan_key, ip))
ip_addr = ipaddress.ip_address(ip)
gw_ip = IPAddress(version=IPAddress.IPV4,
address=ip_addr.packed)
# keep mac address same if its same GW IP
if vlan_key in self._backing_map:
gw_info = self._backing_map[vlan_key]
if gw_info and gw_info.ip == gw_ip:
logging.debug("IP update: no change %s", ip)
return
updated_info = GWInfo(ip=gw_ip, mac="", vlan=vlan_id)
self._backing_map[vlan_key] = updated_info
def get_gw_mac(self, vlan_id: Optional[str] = "") -> Optional[str]:
vlan_key = _get_vlan_key(vlan_id)
if vlan_key in self._backing_map:
return self._backing_map.get(vlan_key).mac
else:
return None
def update_mac(self, ip: str, mac: Optional[str], vlan_id: Optional[str] = ""):
vlan_key = _get_vlan_key(vlan_id)
# TODO: enhance check for MAC address sanity.
if mac is None or ':' not in mac:
logging.error("Incorrect mac format: %s for IP %s (vlan_key %s)",
mac, ip, vlan_id)
return
ip_addr = ipaddress.ip_address(ip)
gw_ip = IPAddress(version=IPAddress.IPV4,
address=ip_addr.packed)
updated_info = GWInfo(ip=gw_ip, mac=mac, vlan=vlan_id)
self._backing_map[vlan_key] = updated_info
def get_all_router_ips(self) -> List[GWInfo]:
return list(self._backing_map.values())
class InvalidVlanId(Exception):
pass
| 34.628571
| 83
| 0.637789
|
769bdf5188fea123080dd0131178d12188cc60fb
| 4,507
|
py
|
Python
|
gameanalysis/script/rest.py
|
egtaonline/GameAnalysis
|
32be1a6b9f616e794362639367ad64360f3e118f
|
[
"Apache-2.0"
] | 7
|
2017-05-17T10:40:45.000Z
|
2021-10-30T12:20:24.000Z
|
gameanalysis/script/rest.py
|
egtaonline/GameAnalysis
|
32be1a6b9f616e794362639367ad64360f3e118f
|
[
"Apache-2.0"
] | 1
|
2015-05-04T20:13:15.000Z
|
2015-05-04T20:13:15.000Z
|
gameanalysis/script/rest.py
|
egtaonline/GameAnalysis
|
32be1a6b9f616e794362639367ad64360f3e118f
|
[
"Apache-2.0"
] | 3
|
2015-05-04T19:58:32.000Z
|
2016-05-17T14:08:28.000Z
|
"""extract and find restrictions"""
import argparse
import json
import sys
import numpy as np
from gameanalysis import gamereader
from gameanalysis import restrict
from gameanalysis import utils
def add_parser(subparsers):
"""Add restriction parser"""
parser = subparsers.add_parser(
"restriction",
aliases=["rest"],
help="""Compute and select
restrictions""",
description="""Extract restricted game and optionally
detects all complete restrictions. All restriction specifications will
be concatenated, resulting in a list of restrictions.""",
)
parser.add_argument(
"--input",
"-i",
metavar="<input-file>",
default=sys.stdin,
type=argparse.FileType("r"),
help="""Input file for script. (default:
stdin)""",
)
parser.add_argument(
"--output",
"-o",
metavar="<output-file>",
default=sys.stdout,
type=argparse.FileType("w"),
help="""Output file for script. (default:
stdout)""",
)
parser.add_argument(
"--no-extract",
"-n",
action="store_true",
help="""Don't extract
restricted games, just print the specifications of the restricted
strategy set. This is mainly only useful with the detect option.""",
)
sub_group = parser.add_argument_group(
title="restriction specifications",
description="""These are all of the
ways to specify restricted games to extract. All of these
specifications are concatenated together before being output.""",
)
sub_group.add_argument(
"--detect",
"-d",
action="store_true",
help="""Run clique finding to
detect maximally complete restrictions.""",
)
sub_group.add_argument(
"--restriction-file",
"-f",
metavar="<file>",
default=[],
type=argparse.FileType("r"),
action="append",
help="""A file that
contains a list of restrictions. A restriction is simply a mapping of
roles to strategies i.e. "{r: ["s1", "s2"]}". This is the same format
that can be output by this script with the no-extract option. This can
be specified multiple times.""",
)
sub_group.add_argument(
"--text-spec",
"-t",
metavar="<role:strat,...;...>",
action="append",
default=[],
help="""Specify a restrictions as a string. To specify the
restriction where role0 has strategies strat0 and strat2 and role1 has
strategy strat1 enter "role0:strat0,strat2;role1:strat1".""",
)
sub_group.add_argument(
"--index-spec",
"-s",
metavar="<i,j,...>",
action="append",
default=[],
help="""Specify a restriction with a list of strategy indices. A
strategy is specified by its zero-indexed position in a list of all
strategies sorted alphabetically by role and sub-sorted alphabetically
by strategy name. For example if role1 has strategies s1, s2, and s3
and role2 has strategies s4 and s5, then the restriction with all but
the last strategy for each role is extracted by "0,1,3". This can be
specified multiple times for several restrictions.""",
)
return parser
def parse_index_spec(game, spec):
"""Parse restriction index specification"""
rest = np.zeros(game.num_strats, bool)
rest[list(map(int, spec.split(",")))] = True
utils.check(
game.is_restriction(rest), '"{}" does not define a valid restriction', spec
)
return rest
def main(args):
"""Entry point for restriction cli"""
game = gamereader.load(args.input)
# Collect all restrictions
restrictions = []
if args.detect:
restrictions.extend(restrict.maximal_restrictions(game))
for rest_file in args.restriction_file:
restrictions.extend(
game.restriction_from_json(spec) for spec in json.load(rest_file)
)
restrictions.extend(game.restriction_from_repr(spec) for spec in args.text_spec)
restrictions.extend(parse_index_spec(game, spec) for spec in args.index_spec)
if args.no_extract:
json.dump(
[game.restriction_to_json(rest) for rest in restrictions], args.output
)
else:
json.dump([game.restrict(rest).to_json() for rest in restrictions], args.output)
args.output.write("\n")
| 32.65942
| 88
| 0.625693
|
48afc02ed363689d819554ee3836dca95238e9cf
| 9,043
|
py
|
Python
|
test/funcional/test_framework/authproxy.py
|
odavila466/Kron-Project
|
8a915e6287ac6d21ac0a32ff69f6f04e260bd1f5
|
[
"MIT"
] | 3
|
2021-05-18T05:11:56.000Z
|
2021-12-05T11:25:38.000Z
|
test/funcional/test_framework/authproxy.py
|
BaymaxValero/Kron-Project
|
e56e596ee36e4b6949ebb75a01867c08481139e2
|
[
"MIT"
] | 1
|
2021-05-13T19:01:05.000Z
|
2021-05-13T19:01:57.000Z
|
test/funcional/test_framework/authproxy.py
|
BaymaxValero/Kron-Project
|
e56e596ee36e4b6949ebb75a01867c08481139e2
|
[
"MIT"
] | 1
|
2021-05-18T05:11:58.000Z
|
2021-05-18T05:11:58.000Z
|
# Copyright (c) 2011 Jeff Garzik
# Previous copyright, from python-jsonrpc/jsonrpc/proxy.py:
# Copyright (c) 2007 Jan-Klaas Kollhof
# Copyright (c) 2017-2020 The Kron Core developers
#
# This file is part of jsonrpc.
#
# jsonrpc is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this software; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
HTTP proxy for opening RPC connection to Krond.
AuthServiceProxy has the following improvements over python-jsonrpc's
ServiceProxy class:
- HTTP connections persist for the life of the AuthServiceProxy object
(if server supports HTTP/1.1)
- sends protocol 'version', per JSON-RPC 1.1
- sends proper, incrementing 'id'
- sends Basic HTTP authentication headers
- parses all JSON numbers that look like floats as Decimal
- uses standard Python json lib
"""
import base64
import decimal
from http import HTTPStatus
import http.client
import json
import logging
import os
import socket
import time
import urllib.parse
HTTP_TIMEOUT = 30
USER_AGENT = "AuthServiceProxy/0.1"
log = logging.getLogger("KronRPC")
class JSONRPCException(Exception):
def __init__(self, rpc_error, http_status=None):
try:
errmsg = '%(message)s (%(code)i)' % rpc_error
except (KeyError, TypeError):
errmsg = ''
super().__init__(errmsg)
self.error = rpc_error
self.http_status = http_status
def encode_decimal(o):
if isinstance(o, decimal.Decimal):
return str(o)
raise TypeError(repr(o) + " is not JSON serializable")
class AuthServiceProxy:
__id_count = 0
# ensure_ascii: escape unicode as \uXXXX, passed to json.dumps
def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None, ensure_ascii=True):
self.__service_url = service_url
self._service_name = service_name
self.ensure_ascii = ensure_ascii # can be toggled on the fly by tests
self.__url = urllib.parse.urlparse(service_url)
user = None if self.__url.username is None else self.__url.username.encode('utf8')
passwd = None if self.__url.password is None else self.__url.password.encode('utf8')
auth_pair = user + b':' + passwd
self.__auth_header = b'Basic ' + base64.b64encode(auth_pair)
self.timeout = timeout
self._set_conn(connection)
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
if self._service_name is not None:
name = "%s.%s" % (self._service_name, name)
return AuthServiceProxy(self.__service_url, name, connection=self.__conn)
def _request(self, method, path, post_data):
"""
Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout).
This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5.
"""
headers = {'Host': self.__url.hostname,
'User-Agent': USER_AGENT,
'Authorization': self.__auth_header,
'Content-type': 'application/json'}
if os.name == 'nt':
# Windows somehow does not like to re-use connections
# TODO: Find out why the connection would disconnect occasionally and make it reusable on Windows
self._set_conn()
try:
self.__conn.request(method, path, post_data, headers)
return self._get_response()
except http.client.BadStatusLine as e:
if e.line == "''": # if connection was closed, try again
self.__conn.close()
self.__conn.request(method, path, post_data, headers)
print("~~~~~~~~~~~~~~~~~ Bad Status Exception ~~~~~~~~~~~~~~~~~~~~~~~~~~")
print(e)
return self._get_response()
else:
raise
except (BrokenPipeError, ConnectionResetError) as e:
# Python 3.5+ raises BrokenPipeError instead of BadStatusLine when the connection was reset
# ConnectionResetError happens on FreeBSD with Python 3.4
self.__conn.close()
self.__conn.request(method, path, post_data, headers)
print("~~~~~~~~~~~~~~~~~ Broken Pipe or Connection Reset Exception ~~~~~~~~~~~~~~~~~~~~~~~~~~")
print(e)
return self._get_response()
def get_request(self, *args, **argsn):
AuthServiceProxy.__id_count += 1
log.debug("-{}-> {} {}".format(AuthServiceProxy.__id_count, self._service_name, json.dumps(args or argsn, default=encode_decimal, ensure_ascii=self.ensure_ascii),))
if args and argsn:
raise ValueError('Cannot handle both named and positional arguments')
return {'version': '1.1',
'method': self._service_name,
'params': args or argsn,
'id': AuthServiceProxy.__id_count}
def __call__(self, *args, **argsn):
post_data = json.dumps(self.get_request(*args, **argsn), default=encode_decimal, ensure_ascii=self.ensure_ascii)
response, status = self._request('POST', self.__url.path, post_data.encode('utf-8'))
if response['error'] is not None:
log.debug("---------------------------<authproxy>---------------------------")
log.debug("Call failed! postdata:")
log.debug(post_data)
log.debug("---------------------------</authproxy>--------------------------")
raise JSONRPCException(response['error'], status)
elif 'result' not in response:
raise JSONRPCException({'code': -343, 'message': 'missing JSON-RPC result'}, status)
elif status != HTTPStatus.OK:
raise JSONRPCException({'code': -342, 'message': 'non-200 HTTP status code but no JSON-RPC error'}, status)
else:
return response['result']
def batch(self, rpc_call_list):
postdata = json.dumps(list(rpc_call_list), default=encode_decimal, ensure_ascii=self.ensure_ascii)
log.debug("--> " + postdata)
response, status = self._request('POST', self.__url.path, postdata.encode('utf-8'))
if status != HTTPStatus.OK:
raise JSONRPCException({'code': -342, 'message': 'non-200 HTTP status code but no JSON-RPC error'}, status)
return response
def _get_response(self):
req_start_time = time.time()
try:
http_response = self.__conn.getresponse()
except socket.timeout:
raise JSONRPCException({
'code': -344,
'message': '%r RPC took longer than %f seconds. Consider '
'using larger timeout for calls that take '
'longer to return.' % (self._service_name, self.__conn.timeout)})
if http_response is None:
raise JSONRPCException({'code': -342, 'message': 'missing HTTP response from server'})
content_type = http_response.getheader('Content-Type')
if content_type != 'application/json':
raise JSONRPCException({'code': -342, 'message': 'non-JSON HTTP response with \'%i %s\' from server' % (http_response.status, http_response.reason)}, http_response.status)
response_data = http_response.read().decode('utf8')
response = json.loads(response_data, parse_float=decimal.Decimal)
elapsed = time.time() - req_start_time
if "error" in response and response["error"] is None:
log.debug("<-%s- [%.6f] %s" % (response["id"], elapsed, json.dumps(response["result"], default=encode_decimal, ensure_ascii=self.ensure_ascii)))
else:
log.debug("<-- [%.6f] %s" % (elapsed, response_data))
return response, http_response.status
def __truediv__(self, relative_uri):
return AuthServiceProxy("{}/{}".format(self.__service_url, relative_uri), self._service_name, connection=self.__conn)
def _set_conn(self, connection=None):
port = 80 if self.__url.port is None else self.__url.port
if connection:
self.__conn = connection
self.timeout = connection.timeout
elif self.__url.scheme == 'https':
self.__conn = http.client.HTTPSConnection(self.__url.hostname, port, timeout=self.timeout)
else:
self.__conn = http.client.HTTPConnection(self.__url.hostname, port, timeout=self.timeout)
| 44.767327
| 183
| 0.637289
|
a50bfd6a90ca0e2db9ef3e6505647e87b8f1c989
| 382
|
py
|
Python
|
hr/models.py
|
alissonperez/employee-manager
|
3a131cd5010eb0295a74e1ab4cd52aa0fbb49690
|
[
"MIT"
] | null | null | null |
hr/models.py
|
alissonperez/employee-manager
|
3a131cd5010eb0295a74e1ab4cd52aa0fbb49690
|
[
"MIT"
] | null | null | null |
hr/models.py
|
alissonperez/employee-manager
|
3a131cd5010eb0295a74e1ab4cd52aa0fbb49690
|
[
"MIT"
] | null | null | null |
from django.db import models
class Department(models.Model):
name = models.CharField(max_length=30, unique=True)
def __str__(self):
return self.name
class Employee(models.Model):
name = models.CharField(max_length=50)
email = models.EmailField(unique=True)
department = models.ForeignKey(Department)
def __str__(self):
return self.name
| 21.222222
| 55
| 0.704188
|
c03948594fc99f348d44aafb8c6792f38dba2164
| 12,375
|
py
|
Python
|
vizic/control_widgets.py
|
ywx649999311/Vizic
|
c408a8d60afcf5ac193d9f8de1e52a9ad28b349d
|
[
"MIT"
] | 21
|
2017-01-06T10:59:16.000Z
|
2020-10-30T22:28:30.000Z
|
vizic/control_widgets.py
|
ywx649999311/Vizic
|
c408a8d60afcf5ac193d9f8de1e52a9ad28b349d
|
[
"MIT"
] | 10
|
2016-12-08T03:15:37.000Z
|
2017-07-10T09:17:31.000Z
|
vizic/control_widgets.py
|
ywx649999311/Vizic
|
c408a8d60afcf5ac193d9f8de1e52a9ad28b349d
|
[
"MIT"
] | 4
|
2017-01-06T08:53:39.000Z
|
2020-10-30T22:28:33.000Z
|
from .astroleaflet import *
# class NotebookUrl(Widget):
# """Widget to get Jupyter server url.
#
# The actural url of the Jupyter server is assigned to class variable ``nb_url`` after the widget being rendered.
# """
# _view_name = Unicode('NotebookUrlView').tag(sync=True)
# _view_module = Unicode('jupyter-vizic').tag(sync=True)
# nb_url = Unicode().tag(sync=True)
class LayerColorPicker(ColorPicker):
"""Layer colorpicker widget.
Attributes:
layer: The layer of which the color is being controlled by the picker.
"""
_view_name = Unicode('LayerColorPickerView').tag(sync=True)
_view_module = Unicode('jupyter-vizic').tag(sync=True)
layer = Instance(Layer)
def __init__(self, **kwargs):
super(LayerColorPicker, self).__init__(**kwargs)
self.value = self.layer.color
self.link(self.layer)
if self.concise:
self.layout.width = '30px'
def unlink(self):
"""Unlink colorpicker and layer."""
self.dlink.unlink()
def link(self, layer):
"""Link the colorpicker to the layer.
Used directional link from value attribute to the color attribute of
the target layer object.
"""
self.layer = layer
self.dlink = dlink((self, 'value'), (self.layer, 'color'))
class PopupDis(DOMWidget):
"""Popup display Widget
Attributes:
layer: The base tilelayer that the widget is monitoring.
"""
_view_name = Unicode('PopupDisView').tag(sync=True)
_view_module = Unicode('jupyter-vizic').tag(sync=True)
_object_info = Dict().tag(sync=True)
layer = Instance(GridLayer)
data = Instance(pd.Series, allow_none=True)
def __init__(self, **kwargs):
"""Initiate the widget object create a direction link
The link is from the obj_catalog attribute of the layer object to the
data attribute in this widget.
"""
super(PopupDis, self).__init__(**kwargs)
# self.layout.width = '100%'
self.dlink = dlink((self.layer, 'obj_catalog'), (self, 'data'))
@observe('data')
def _update_data(self, change):
"""Observe changes in ``data`` and update at front-end."""
old = change['old']
new = change['new']
if old is Undefined:
return
if new is not None and not new.equals(old):
self._object_info = new.to_dict()
class HomeButton(Button):
"""Home button Widget
Reset the map to initial zoom level and center.
"""
_view_name = Unicode('HomeButtonView').tag(sync=True)
_view_module = Unicode('jupyter-vizic').tag(sync=True)
_map = Instance(AstroMap, allow_none=True)
def __init__(self, map, **kwargs):
"""
Args:
map: An AstroMap object, which the widget is intended to control.
**kwargs: Arbitrary keyward arguments for ``Button``.
"""
super(HomeButton, self).__init__(**kwargs)
self._map = map
self.layout = Layout(height='30px', width='30px')
self.on_click(self.handle_click)
def handle_click(self, b):
"""Reset the map"""
if self._map is not None:
self._map.center_map()
class CFDropdown(Dropdown):
"""Dropdown menu for selecting colormapping field."""
_active = Bool(False)
def __init__(self, layer, **kwargs):
"""Extends ``Dropdown`` class from ``ipywidgets``.
Args:
layer: The tileLayer that the menu is associated with.
**kwargs: Arbitrary keyward arguments for ``Dropdown``.
"""
super(CFDropdown, self).__init__(**kwargs)
self._layer = layer
self.description = 'Property: '
self.layout.width = '100%'
self.options = list(self._layer.get_fields())
dlink((self._layer, 'custom_c'), (self, '_active'))
def link(self):
"""Link the value of the dropdown to ``c_field`` in tileLayer"""
# either dlink or use @validate on python side instead
self.link = dlink((self, 'value'), (self._layer, 'c_field'))
def unlink(self):
"""Unlink for provided tileLayer"""
self.link.unlink()
self._layer.c_field = ''
del self.link
@observe('_active')
def update_active(self, change):
"""Update the active status of the menu."""
if change['new'] is False:
self.unlink()
elif change['new'] is True:
self.link()
class ColorMap(Dropdown):
"""Dropdown menu for selecting colormapping color space."""
_layer = Instance(GridLayer)
colorSpaces = {
'Spectral': 1,
'BrBG': 2,
'PRGn': 3,
'PiYG': 4,
'PuOr': 5,
'RdBu': 6,
'RdYlBu': 7,
'RdYlGn':8,
'Blues':9,
'Greens':10,
'Oranges':11,
'Purples':12,
'Reds':13,
'BuGn':14,
'BuPu':15,
'GnBu':16,
'OrRd':17,
'PuBuGn':18,
'PuBu':19,
'PuRd':20,
'RdPu':21,
'YlGnBu':22,
'YlGn':23,
'YlOrBr':24,
'YlOrRd':25
}
def __init__(self, gridlayer, **kwargs):
"""Extends ``Dropdown`` class from ``ipywidgets``.
Args:
gridlayer: The base tileLayer the widget is associate with.
**kwargs: Arbitrary keyward arguments for ``Dropdown``.
"""
super(ColorMap, self).__init__(**kwargs)
self._layer = gridlayer
self.description = 'ColorMap: '
self.layout.width = '100%'
self.options = self.colorSpaces
self.value = self._layer.c_map
dlink((self,'value'), (self._layer, 'c_map'))
class FilterSlider(FloatRangeSlider):
"""RangeSlider widget for filering displayed objects.
Ranges for selected field are automatically displayed on the slider. Move
the bars to filter out unwanted objects.
Attributes:
readout_format(str): The format of the float numbers, which show the
value range of a particular property, on the slider.
"""
readout_format = Unicode('.3f').tag(sync=True)
def __init__(self, layer, field, **kwargs):
"""Extends ``FloatRangeSlider`` from ``ipywidgets``.
Args:
layer: A gridLayer instance.
field(str): The property field of the catalog that the slider will
use for filtering.
**kwargs: Arbitrary keyword arguments for ``FloatRangeSlider``.
"""
super(FilterSlider, self).__init__(**kwargs)
self._layer = layer
self.property = field.upper()
self.min, self.max = (-1e6, 1e6)
self.min, self.max = self._layer.get_min_max(field)
self.value = [self.min, self.max]
self.step = 0.0001
self.layout.width = '100%'
# self.link()
def _change_field(self, field):
self.property = field.upper()
self.min, self.max = (-1e6, 1e6)
self.min, self.max = self._layer.get_min_max(field)
self.value = [self.min, self.max]
def link(self):
"""Link slider values with the ``filter_range`` from tileLayer."""
self._layer.filter_property = self.property
self.link = dlink((self, 'value'), (self._layer, 'filter_range'))
def unlink(self):
"""Unlink from the provided tileLayer."""
self.link.unlink()
del self.link
self._layer.filter_property = ''
class FilterWidget(Box):
"""A Dropdown menu and a rangeSlider wrapped in a box layout.
Select the field for filtering objects and perform the filter action in one
widget. The map will reset when a new field is chosen.
"""
filter_field = Unicode()
_active = Bool(False)
@default('layout')
def _default_layout(self):
return Layout(display='flex', flex_flow='column',align_items='stretch', width='100%')
def __init__(self, layer, *pargs, **kwargs):
"""Extends ``Box`` from ``ipywidgets``.
Two links are created: 1) link the dropDown menu with the ``filter_field``
attribute from the tileLayer. 2) link the ``filter_obj`` attribute from
the tileLayer to the ``_active`` status attribute in this widget.
Args:
layer: A gridLayer instance.
*args: Variable length argument list for ``Box``.
**kwargs: Arbitrary keyword arguments for ``Box``.
"""
super(FilterWidget, self).__init__(*pargs, **kwargs)
self._layer = layer
self.dropDown = Dropdown(options=list(self._layer.get_fields()), width='100%')
self.slider = FilterSlider(layer, self.dropDown.value)
self.children = (self.dropDown, self.slider)
dlink((self.dropDown, 'value'),(self, 'filter_field'))
dlink((self._layer,'filter_obj'), (self, '_active'))
def link(self):
"""Link the slider with the provided tileLayer."""
self.slider.link()
def unlink(self):
"""Unlink slider from the tileLayer."""
self.slider.unlink()
@observe('filter_field')
def update_field(self, change):
"""Observe changes in the dropDown menu and updates"""
if change['new'] != '':
self._layer.filter_property = change['new']
self.slider._change_field(change['new'])
@observe('_active')
def update_active(self, change):
"""Unlink this widget from layer if ``_active`` changes to False."""
if change['new'] is False:
self.unlink()
class FilterBox(Box):
"""A box layout wrapping a FilterSlider object."""
@default('layout')
def _default_layout(self):
return Layout(display='flex', align_items='stretch', justify_content='space_between')
def __init__(self, layer, field, *pargs, **kwargs):
"""Extends ``Box`` from ``ipywidgets``.
Args:
layer: A gridLayer instance.
field(str): The property field of the catalog that the slider will
use for filtering.
*args: Variable length argument list for ``Box``.
**kwargs: Arbitrary keyword arguments for ``Box``.
"""
super(FilterBox, self).__init__(*pargs, **kwargs)
self.label = Label(field.upper())
self.label.padding = '7px 2px 2px 2px'
self.slider = FilterSlider(layer, field)
self.children = (self.label, self.slider)
def link(self):
self.slider.link()
def unlink(self):
self.slider.unlink()
class SelectionTrig(ToggleButton):
"""A control widget to trigger lasso selection"""
_view_name = Unicode('SelectionButtonView').tag(sync=True)
_view_module = Unicode('jupyter-vizic').tag(sync=True)
_map = Instance(AstroMap, allow_none=True)
def __init__(self, map, **kwargs):
"""Extends ``ToggleButton`` from ``ipywidgets``.
Args:
map: An AstroMap map object that the trigger widget is associated
with.
**kwargs: Arbitray keyword arguments for ``ToggleButton``.
"""
super(SelectionTrig, self).__init__(**kwargs)
self._map = map
self.layout = Layout(height='30px', width='30px')
def link(self):
"""Link the trigger to target AstroMap object"""
self.link = link((self, 'value'), (self._map, 'selection'))
def unlink(self):
"""Unlink from the provided AstroMap"""
self.link.unlink()
del self.link
class GetDataButton(Button):
"""Getting selected data.
Clicking this button to query the database for data selected using the
lasso-like selection tool.
"""
_view_name = Unicode('GetDataButtonView').tag(sync=True)
_view_module = Unicode('jupyter-vizic').tag(sync=True)
_layer = Instance(GridLayer)
def __init__(self, layer, **kwargs):
"""Extends ``Button`` from ``ipywidgets``.
Args:
layer: The tileLayer that the button is asccoiate with.
"""
super(GetDataButton, self).__init__(**kwargs)
self._layer = layer
self.layout = Layout(height='30px', width='30px')
self.on_click(self.handle_click)
def handle_click(self, b):
if self._layer._map is not None and self._layer._map.selection:
self.disabled = True
self._layer._query_selection()
self.disabled = False
| 32.651715
| 117
| 0.604364
|
0234674b0cea8862a0acc7a1ab3178b28def3347
| 1,269
|
py
|
Python
|
algorithms/matrix/spiral_traversal.py
|
GuyHassan/algo
|
26d58aef1d87c33b4390b6f7ddeb93c3c124db39
|
[
"MIT"
] | null | null | null |
algorithms/matrix/spiral_traversal.py
|
GuyHassan/algo
|
26d58aef1d87c33b4390b6f7ddeb93c3c124db39
|
[
"MIT"
] | null | null | null |
algorithms/matrix/spiral_traversal.py
|
GuyHassan/algo
|
26d58aef1d87c33b4390b6f7ddeb93c3c124db39
|
[
"MIT"
] | null | null | null |
"""
Given a matrix of m x n elements (m rows, n columns),
return all elements of the matrix in spiral order.
For example,
Given the following matrix:
[
[ 1, 2, 3 ],
[ 4, 5, 6 ],
[ 7, 8, 9 ]
]
You should return [1,2,3,6,9,8,7,4,5].
"""
def spiral_traversal(matrix):
res = []
if len(matrix) == 0:
return res
row_begin = 0
row_end = len(matrix) - 1
col_begin = 0
col_end = len(matrix[0]) - 1
while row_begin <= row_end and col_begin <= col_end:
for i in range(col_begin, col_end+1):
res.append(matrix[row_begin][i])
row_begin += 1
for i in range(row_begin, row_end+1):
res.append(matrix[i][col_end])
col_end -= 1
helpFunc1(row_begin,row_end,col_end,col_begin,res,matrix)
return res
def helpFunc1(row_begin,row_end,col_end,col_begin,res,matrix):
if row_begin <= row_end:
for i in range(col_end, col_begin - 1, -1):
res.append(matrix[row_end][i])
row_end -= 1
if col_begin <= col_end:
for i in range(row_end, row_begin - 1, -1):
res.append(matrix[i][col_begin])
col_begin += 1
if __name__ == "__main__":
mat = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
print(spiral_traversal(mat))
| 24.403846
| 65
| 0.580772
|
2817fdfb6ec0d8831de8fc4a29632e1b12d996ae
| 1,560
|
py
|
Python
|
test/test_create_automatic_tokens_forwarding_response_item_token_data_bitcoin_omni_token.py
|
xan187/Crypto_APIs_2.0_SDK_Python
|
a56c75df54ef037b39be1315ed6e54de35bed55b
|
[
"MIT"
] | null | null | null |
test/test_create_automatic_tokens_forwarding_response_item_token_data_bitcoin_omni_token.py
|
xan187/Crypto_APIs_2.0_SDK_Python
|
a56c75df54ef037b39be1315ed6e54de35bed55b
|
[
"MIT"
] | null | null | null |
test/test_create_automatic_tokens_forwarding_response_item_token_data_bitcoin_omni_token.py
|
xan187/Crypto_APIs_2.0_SDK_Python
|
a56c75df54ef037b39be1315ed6e54de35bed55b
|
[
"MIT"
] | 1
|
2021-07-21T03:35:18.000Z
|
2021-07-21T03:35:18.000Z
|
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import cryptoapis
from cryptoapis.model.create_automatic_tokens_forwarding_response_item_token_data_bitcoin_omni_token import CreateAutomaticTokensForwardingResponseItemTokenDataBitcoinOmniToken
class TestCreateAutomaticTokensForwardingResponseItemTokenDataBitcoinOmniToken(unittest.TestCase):
"""CreateAutomaticTokensForwardingResponseItemTokenDataBitcoinOmniToken unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCreateAutomaticTokensForwardingResponseItemTokenDataBitcoinOmniToken(self):
"""Test CreateAutomaticTokensForwardingResponseItemTokenDataBitcoinOmniToken"""
# FIXME: construct object with mandatory attributes with example values
# model = CreateAutomaticTokensForwardingResponseItemTokenDataBitcoinOmniToken() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 42.162162
| 484
| 0.802564
|
7f441f7e252f37c6925ba70b61898a4ccf62853e
| 2,004
|
py
|
Python
|
rss_feed_parser.py
|
nalindas9/rss-feed-parser
|
9d39f6a8be682e925d7efb7e3fd126bfb1493db6
|
[
"MIT"
] | null | null | null |
rss_feed_parser.py
|
nalindas9/rss-feed-parser
|
9d39f6a8be682e925d7efb7e3fd126bfb1493db6
|
[
"MIT"
] | null | null | null |
rss_feed_parser.py
|
nalindas9/rss-feed-parser
|
9d39f6a8be682e925d7efb7e3fd126bfb1493db6
|
[
"MIT"
] | null | null | null |
import requests
from bs4 import BeautifulSoup
import pandas as pd
class RSSFeedParser:
def __init__(self) -> None:
pass
def getResponse(self, url: str) -> requests.Response:
"""
Get the response from the url
"""
try:
resp = requests.get(url)
return resp
except Exception as e:
print('Exception: {}'.format(e))
print('Status code: {}'.format(resp.status_code))
return None
def getNewsItems(self, resp: requests.Response) -> list:
"""
Get the news items from the response
"""
try:
soup = BeautifulSoup(resp.content, features='xml')
# Find all the <item> tags
items = soup.find_all('item')
# Create a list of dictionaries to store the news items
news_items = list()
# Scrape the HTML tags for each news item
for item in items:
news_item = dict()
news_item['title'] = item.title.text
news_item['link'] = item.link.text
news_item['pubDate'] = item.pubDate.text
news_items.append(news_item)
return news_items
except Exception as e:
print('Exception: {}'.format(e))
return None
def getDataFrame(self, news_items: list) -> pd.DataFrame:
"""
Create a dataframe from the news items
"""
try:
df = pd.DataFrame(news_items, columns=['title', 'link', 'pubDate'])
return df
except Exception as e:
print('Exception: {}'.format(e))
return None
def saveDataFrame(self, df: pd.DataFrame, filename: str) -> None:
"""
Save the dataframe to a csv file
"""
try:
df.to_csv(filename, index=False, encoding='utf-8')
except Exception as e:
print('Exception: {}'.format(e))
return None
| 31.3125
| 79
| 0.532435
|
4fff8c3aa947c0a4712a0d5a1d67d6e149af0595
| 3,109
|
py
|
Python
|
sdk/synapse/azure-synapse-artifacts/azure/synapse/artifacts/aio/_configuration.py
|
xolve/azure-sdk-for-python
|
9f5baa19c392f77f811d936ee43450e4ea524002
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/synapse/azure-synapse-artifacts/azure/synapse/artifacts/aio/_configuration.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/synapse/azure-synapse-artifacts/azure/synapse/artifacts/aio/_configuration.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ArtifactsClientConfiguration(Configuration):
"""Configuration for ArtifactsClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param endpoint: The workspace development endpoint, for example https://myworkspace.dev.azuresynapse.net.
:type endpoint: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
endpoint: str,
**kwargs: Any
) -> None:
super(ArtifactsClientConfiguration, self).__init__(**kwargs)
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if endpoint is None:
raise ValueError("Parameter 'endpoint' must not be None.")
self.credential = credential
self.endpoint = endpoint
self.credential_scopes = kwargs.pop('credential_scopes', ['https://dev.azuresynapse.net/.default'])
kwargs.setdefault('sdk_moniker', 'synapse-artifacts/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| 47.106061
| 134
| 0.692827
|
28b4da5315329d7ff00f1bd1b18453a7482cdb46
| 10,037
|
py
|
Python
|
src/zivid_manager.py
|
SeungBack/assembly_camera_calibrator
|
294251b9abdf8c1547e446c3661943eb5df2aed3
|
[
"MIT"
] | 2
|
2020-07-07T12:28:09.000Z
|
2020-09-22T11:13:22.000Z
|
src/zivid_manager.py
|
SeungBack/assembly_camera_manager
|
294251b9abdf8c1547e446c3661943eb5df2aed3
|
[
"MIT"
] | null | null | null |
src/zivid_manager.py
|
SeungBack/assembly_camera_manager
|
294251b9abdf8c1547e446c3661943eb5df2aed3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import rospy
import rosnode
from zivid_camera.srv import *
from std_msgs.msg import String
from sensor_msgs.msg import PointCloud2, Image
import numpy as np
from fiducial_msgs.msg import FiducialTransformArray
from assembly_camera_manager.srv import ExtrinsicCalibrate
import tf
from tf import transformations as t
import tf2_ros
import geometry_msgs
from open3d_ros_helper import open3d_ros_helper as orh
from assembly_camera_manager.srv import GetCameraPoseSingleMarker, GetCameraPoseMultipleMarker, SetCameraPose
import yaml
class ZividManager:
def __init__(self):
rospy.init_node("zivid_manager", anonymous=True)
# params
self.camera_name = rospy.get_param('~camera_name')
self.capture_time = rospy.get_param('~capture_time')
with open(rospy.get_param('~world_map')) as f:
self.world_map = yaml.load(f, Loader=yaml.FullLoader)
# services
ca_suggest_settings_service = "/zivid_camera/capture_assistant/suggest_settings"
rospy.wait_for_service(ca_suggest_settings_service, 30.0)
self.capture_assistant_service = rospy.ServiceProxy(
ca_suggest_settings_service, CaptureAssistantSuggestSettings
)
self.capture_service = rospy.ServiceProxy("/zivid_camera/capture", Capture)
getcamerapose_singlemarker_srv = rospy.Service('/{}/get_camera_pose_single_marker'
.format(self.camera_name), GetCameraPoseSingleMarker, self.get_camera_pose_from_single_marker)
getcamerapose_multiplemarker_srv = rospy.Service('/{}/get_camera_pose_multiple_marker'
.format(self.camera_name), GetCameraPoseMultipleMarker, self.get_camera_pose_from_multiple_marker)
setcamerapose_srv = rospy.Service('/{}/set_camera_pose'
.format(self.camera_name), SetCameraPose, self.set_camera_pose)
self.static_aruco_tfs = []
self.static_world_tfs = []
self.br = tf2_ros.StaticTransformBroadcaster()
self.tf_buffer = tf2_ros.Buffer(rospy.Duration(1.0))
self.listener = tf2_ros.TransformListener(self.tf_buffer)
rospy.loginfo("Starting zivid_manager.py for {}".format(self.camera_name))
def capture_assistant_suggest_settings(self):
max_capture_time = rospy.Duration.from_sec(self.capture_time) # 0.2 to 10s
rospy.loginfo(
"Calling capture assistant service with max capture time = %.2f sec",
max_capture_time.to_sec(),
)
self.capture_assistant_service(
max_capture_time=max_capture_time,
ambient_light_frequency=CaptureAssistantSuggestSettingsRequest.AMBIENT_LIGHT_FREQUENCY_NONE,
)
def get_camera_pose_from_single_marker(self, msg):
target_id = msg.target_id
n_frame = msg.n_frame
img_err_thresh = msg.img_err_thresh
obj_err_thresh = msg.obj_err_thresh
rospy.loginfo("Get camera pose of {} for marker ID {}".format(self.camera_name, target_id))
pos_list = []
quat_list = []
img_err_list = []
obj_err_list = []
n_sucess = 0
# get transforms for n_frame
for n in range(n_frame):
fid_tfs = rospy.wait_for_message('/{}/fiducial_transforms'.format(self.camera_name), FiducialTransformArray)
header_frame_id = fid_tfs.header.frame_id
for i, fid_tf in enumerate(fid_tfs.transforms):
if fid_tf.fiducial_id == target_id:
pos, quat = orh.transform_to_pq(fid_tf.transform)
pos_list.append(pos)
quat_list.append(quat)
img_err_list.append(fid_tf.image_error)
obj_err_list.append(fid_tf.object_error)
n_sucess += 1
if len(pos_list) == 0:
rospy.logwarn("Failed to detect the marker ID {}".format(target_id))
return False
# select the frame with minimum image error
idx = np.argmin(img_err_list)
rospy.loginfo("\t Marker ID {}: n_sucess={}/{}".format(target_id, n_sucess, n_frame))
if img_err_list[idx] > img_err_thresh:
rospy.logwarn("Reject marker ID {} (img err: {:.4f} > {:.4f})".format(target_id, img_err_list[idx], img_err_thresh))
return False
if obj_err_list[idx] > img_err_thresh:
rospy.logwarn("Reject marker ID {} (obj err: {:.4f} > {:.4f})".format(target_id, obj_err_list[idx], obj_err_thresh))
return False
else:
rospy.loginfo("\t img err: {:.4f} \t obj err:{:.4f}".format(img_err_list[idx], obj_err_list[idx]))
pos_min = pos_list[idx]
quat_min = quat_list[idx]
source_frame = "{}".format(header_frame_id)
target_frame = "{}_camera_fid_{}".format(self.camera_name, target_id)
static_tf_min = orh.pq_to_transform_stamped(pos_min, quat_min, source_frame, target_frame)
self.static_aruco_tfs.append(static_tf_min)
rospy.loginfo("Publish static tf: {} -> {}_camera_fid_{} from ArUco".format(header_frame_id, self.camera_name, target_id))
# find target marker in world map
target_marker = None
for marker in self.world_map["markers"]:
if marker["id"] == target_id:
target_marker = marker
if target_marker is None:
rospy.logwarn("No information in world map for marker ID {}".format(target_id))
pos = target_marker["position"]
pos = [-p for p in pos]
quat = target_marker["orientation"] # TODO: invert quaternion
source_frame = "{}_camera_fid_{}".format(self.camera_name, target_id)
target_frame = "base"
static_tf_base_to_fid = orh.pq_to_transform_stamped(pos, quat, source_frame, target_frame)
self.static_world_tfs.append(static_tf_base_to_fid)
if msg.publish_worldmap:
rospy.loginfo("Publish static tf:{}_camera_fid_{} -> base from world map ".format(self.camera_name, target_id))
self.br.sendTransform(self.static_aruco_tfs + self.static_world_tfs)
else:
self.br.sendTransform(self.static_aruco_tfs)
return True
def get_camera_pose_from_multiple_marker(self, msg):
self.static_aruco_tfs = [] # initialize static tf
for target_id in msg.target_ids:
getcamerapose_singlemarker = rospy.ServiceProxy('/{}/get_camera_pose_single_marker'.format(self.camera_name), GetCameraPoseSingleMarker)
is_sucess = getcamerapose_singlemarker(False, target_id, msg.n_frame, msg.img_err_thresh, msg.obj_err_thresh)
if msg.publish_worldmap:
# get average of aruco map
pos_list = []
quat_list = []
for aruco_tf in self.static_aruco_tfs:
pos, quat = orh.transform_stamped_to_pq(aruco_tf)
pos_list.append(pos)
quat_list.append(quat)
pos_aruco_avg, quat_aruco_avg = orh.average_pq(pos_list, quat_list)
# calculate fid 0 to average of aruco map
pos_fid, quat_fid = orh.transform_stamped_to_pq(self.static_aruco_tfs[0])
pos_fid_to_avg = pos_fid - pos_aruco_avg
quat_aruco_avg = PyKDL.Rotation.Quaternion(*quat_aruco_avg)
quat_fid = PyKDL.Rotation.Quaternion(*quat_fid)
quat_fid_to_avg = quat_aruco_avg * quat_fid.Inverse()
# get corresponding tf from world map
pos_list = []
quat_list = []
for world_tf in self.static_world_tfs:
pos, quat = orh.transform_stamped_to_pq(world_tf)
pos_list.append(pos)
quat_list.append(quat)
pos_base_to_avg, quat_base_to_avg = orh.average_pq(pos_list, quat_list)
# calculate average of aruco map to world_base
pos_avg_to_base = [p for p in pos_base_to_avg]
quat_base_to_avg = PyKDL.Rotation.Quaternion(*quat_base_to_avg)
quat_avg_to_base = quat_base_to_avg.Inverse()
# aruco tf #1 to aruco tf average + aruco tf average to base
pos_fid_to_base = [sum(p) for p in zip(pos_fid_to_avg, pos_avg_to_base)]
quat_fid_to_base = quat_fid_to_avg * quat_avg_to_base
quat_fid_to_base = quat_fid_to_base.GetQuaternion()
source_frame = self.static_aruco_tfs[0].child_frame_id
target_frame = "base"
static_tf_fid_to_base = orh.pq_to_transform_stamped(pos_fid_to_base, quat_fid_to_base, source_frame, target_frame)
self.static_world_tfs.append(static_tf_fid_to_base)
self.br.sendTransform(self.static_aruco_tfs + self.static_world_tfs)
self.save_transfrom_as_json("base", "{}_rgb_camera_link".format(self.camera_name))
rospy.loginfo("Finished the camera pose calibration")
return True
def set_camera_pose(self, msg):
with open(os.path.join(self.camera_map, msg.json_file + '.json'), "r") as json_file:
json_str = json.load(json_file)
self.static_aruco_tfs = []
static_tf = json_message_converter.convert_json_to_ros_message('geometry_msgs/TransformStamped', json_str)
static_tf.header.stamp = rospy.Time.now()
self.static_aruco_tfs.append(static_tf)
self.br.sendTransform(self.static_aruco_tfs)
rospy.loginfo("published static tf: {} -> {} from json".format(\
static_tf.header.frame_id, static_tf.child_frame_id))
return True
if __name__ == "__main__":
zivid_manager = ZividManager()
zivid_manager.capture_assistant_suggest_settings()
if rospy.get_param('~repeat'):
rospy.loginfo("Repeat capturing")
while True:
zivid_manager.capture_service()
zivid_manager.br.sendTransform(zivid_manager.static_aruco_tfs + zivid_manager.static_world_tfs)
else:
rospy.spin()
| 47.34434
| 148
| 0.663943
|
6a600bf65e6ce2dc67f4df370f5c3f8250e5021d
| 6,576
|
py
|
Python
|
train/create_parameter_space.py
|
neurosimata/seizy
|
2e18851f90cdda21ad85af3b2224eff17b5689bc
|
[
"Apache-2.0"
] | null | null | null |
train/create_parameter_space.py
|
neurosimata/seizy
|
2e18851f90cdda21ad85af3b2224eff17b5689bc
|
[
"Apache-2.0"
] | null | null | null |
train/create_parameter_space.py
|
neurosimata/seizy
|
2e18851f90cdda21ad85af3b2224eff17b5689bc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
### ------------------------- IMPORTS ------------------------ ###
import os
import numpy as np
import pandas as pd
from train.feature_settings import metrics
### ---------------------------------------------------------- ###
class CreateCatalogue:
"""
Create parameter catalogue from best thresholds for model training.
"""
def __init__(self, csv_dir='train', metrics_csv='threshold_metrics.csv',
output_csv_name='parameter_catalogue.csv'):
"""
Parameters
----------
csv_dir : str, directory to save output save.
metrics_csv : str, name of metrics csv file.
output_csv_name : str, name of output csv file.
Returns
-------
None.
"""
# get paths
self.metircs_csv_path = os.path.join(csv_dir, metrics_csv)
self.output_csv_path = os.path.join(csv_dir, output_csv_name)
# get thresholds and feature labels
self.thresholds = self.get_thresholds()
self.feature_labels = self.thresholds.columns[1:]
# get feature parameters for method testing
self.thresh_array, self.weights, self.feature_set = self.get_feature_parameters()
# define metrics
self.metrics = metrics
def get_thresholds(self):
"""
Get best thresholds and ranks.
Returns
-------
thresholds : pd.DataFrame
"""
# load metrics
df = pd.read_csv(self.metircs_csv_path)
# find threshold for minimum cost
df['cost'] = df['false_positive_rate'] - df['percent_detected']
min_cost = df.loc[df.groupby('features').cost.idxmin()]
# combine thresholds with ranks
thresholds = pd.DataFrame(min_cost[['threshold', 'features']])
thresholds['weights'] = len(min_cost['features']) - min_cost['cost'].rank()
thresholds['cost'] = min_cost['cost']
# format dataframe
thresholds = thresholds.T
column_name = 'features'
thresholds.columns = thresholds.loc[column_name]
thresholds = thresholds.drop(thresholds.index[1])
thresholds = thresholds.rename_axis('metrics').reset_index()
return thresholds
def get_feature_parameters(self, n_repeat=500):
"""
Get feature parameter combinations for method testing.
Parameters
----------
n_repeat : int, number of times to add random features per dataset.
Returns
-------
thresh_array : list
weights : list
feature_set : list
"""
# get feature properties
df = self.thresholds
features = np.array(self.feature_labels).reshape(1,-1)
ranks = np.array(df.loc[df['metrics'] == 'weights'])[0][1:]
ranks = ranks.astype(np.double)
optimum_threshold = np.array(df.loc[df['metrics'] == 'threshold'])[0][1:]
optimum_threshold = optimum_threshold.astype(np.double)
# define different threshold levels for testing
thresh_array = []
add_to_optimum_thresh = np.arange(-1, 2.5, .5)
add_to_thresh = np.arange(2, 4, .5)
for opt_threshold, reg_threshold in zip(add_to_optimum_thresh, add_to_thresh):
thresh_array.append(optimum_threshold + opt_threshold)
thresh_array.append(np.ones((optimum_threshold.shape[0])) * reg_threshold)
# define two sets of weights
weights = [np.ones((features.shape[1])), ranks]
# define feature sets
feature_set_or = [np.ones((ranks.shape[0]), dtype=bool),
ranks > np.percentile(ranks, 50),
ranks > np.percentile(ranks, 75)]
n_repeats = n_repeat * np.array([0.01, 0.8, 0.1])
n_repeats = n_repeats.astype(int)
# expand feature dataset by randomly dropping selected features
feature_set = feature_set_or.copy()
for i in range(len(feature_set_or)): # iterate through original dataset
len_temp = sum(feature_set_or[i])
max_drop = int(len_temp - 2)
min_drop = int(len_temp/2)
for ii in range(n_repeats[i]): # iterate n times to drop random features
temp_feature = feature_set_or[i].copy()
drop_n = np.random.randint(min_drop, max_drop)
true_idx = np.where(temp_feature)[0]
idx = np.random.choice(true_idx, drop_n,
replace=False)
temp_feature[idx] = False
feature_set.append(temp_feature)
# get unique feature combinations
feature_set = [np.array(x) for x in set(tuple(x) for x in feature_set)]
return thresh_array, weights, feature_set
def get_parameter_space(self):
"""
Create self dataframe based on thresholds, weighs and feature set.
Returns
-------
df : pandas DataFrame
"""
# get df columns
columns = self.metrics + ['Thresh_' + x for x in self.feature_labels] \
+ ['Weight_' + x for x in self.feature_labels] + ['Enabled_' + x for x in self.feature_labels]
# create df
rows = len(self.thresh_array) * len(self.weights) *len(self.feature_set)
df = pd.DataFrame(data= np.zeros((rows, len(columns))), columns = columns)
# get index
idx2 = len(self.metrics) + len(self.feature_labels)
idx3 = idx2 + len(self.feature_labels)
cntr = 0; # init cntr
for thresh in self.thresh_array:
for weight in self.weights:
for feature in self.feature_set:
df.loc[cntr][len(self.metrics):idx2] = thresh
df.loc[cntr][idx2:idx3] = weight
df.loc[cntr][idx3:] = feature.astype(np.double)
cntr+=1 # update counter
df.to_csv(self.output_csv_path, index=False)
print('--> Parameter catalogue stored in:', self.output_csv_path, '\n')
return df
if __name__ =='__main__':
# get parameter space catalogue
df_catalogue = CreateCatalogue().get_parameter_space()
# df_catalogue.to_csv('template_catalogue.csv', index=False)
| 34.429319
| 120
| 0.558242
|
cb7c2e1f16e5834b0dc32a4e0c18ae24a15b9950
| 4,447
|
py
|
Python
|
openstack_dashboard/contrib/developer/profiler/middleware.py
|
2020human/horizon
|
fab662a19c02318c10c69efced0fac43c28d95f9
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/contrib/developer/profiler/middleware.py
|
2020human/horizon
|
fab662a19c02318c10c69efced0fac43c28d95f9
|
[
"Apache-2.0"
] | 12
|
2022-03-22T07:28:29.000Z
|
2022-03-22T07:29:55.000Z
|
openstack_dashboard/contrib/developer/profiler/middleware.py
|
2020human/horizon
|
fab662a19c02318c10c69efced0fac43c28d95f9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.core import exceptions
from django.core.urlresolvers import reverse
from django.utils import safestring
from django.utils.translation import ugettext_lazy as _
from osprofiler import _utils as profiler_utils
from osprofiler import profiler
from osprofiler import web
import six
from horizon import messages
from openstack_dashboard.contrib.developer.profiler import api
_REQUIRED_KEYS = ("base_id", "hmac_key")
_OPTIONAL_KEYS = ("parent_id",)
PROFILER_CONF = getattr(settings, 'OPENSTACK_PROFILER', {})
PROFILER_ENABLED = PROFILER_CONF.get('enabled', False)
class ProfilerClientMiddleware(object):
def __init__(self):
if not PROFILER_ENABLED:
raise exceptions.MiddlewareNotUsed()
super(ProfilerClientMiddleware, self).__init__()
def process_request(self, request):
if 'profile_page' in request.COOKIES:
hmac_key = PROFILER_CONF.get('keys')[0]
profiler.init(hmac_key)
for hdr_key, hdr_value in web.get_trace_id_headers().items():
request.META[hdr_key] = hdr_value
return None
class ProfilerMiddleware(object):
def __init__(self):
self.name = PROFILER_CONF.get('facility_name', 'horizon')
self.hmac_keys = PROFILER_CONF.get('keys', [])
if PROFILER_ENABLED:
api.init_notifier(PROFILER_CONF.get('notifier_connection_string'))
else:
raise exceptions.MiddlewareNotUsed()
@staticmethod
def is_authenticated(request):
return hasattr(request, "user") and request.user.is_authenticated()
def is_enabled(self, request):
return self.is_authenticated(request) and settings.DEBUG
@staticmethod
def _trace_is_valid(trace_info):
if not isinstance(trace_info, dict):
return False
trace_keys = set(six.iterkeys(trace_info))
if not all(k in trace_keys for k in _REQUIRED_KEYS):
return False
if trace_keys.difference(_REQUIRED_KEYS + _OPTIONAL_KEYS):
return False
return True
def process_view(self, request, view_func, view_args, view_kwargs):
# do not profile ajax requests for now
if not self.is_enabled(request) or request.is_ajax():
return None
trace_info = profiler_utils.signed_unpack(
request.META.get('X-Trace-Info'),
request.META.get('X-Trace-HMAC'),
self.hmac_keys)
if not self._trace_is_valid(trace_info):
return None
profiler.init(**trace_info)
info = {
'request': {
'path': request.path,
'query': request.GET.urlencode(),
'method': request.method,
'scheme': request.scheme
}
}
with api.traced(request, view_func.__name__, info) as trace_id:
response = view_func(request, *view_args, **view_kwargs)
url = reverse('horizon:developer:profiler:index')
message = safestring.mark_safe(
_('Traced with id %(id)s. Go to <a href="%(url)s">page</a>') %
{'id': trace_id, 'url': url})
messages.info(request, message)
return response
@staticmethod
def clear_profiling_cookies(request, response):
"""Expire any cookie that initiated profiling request."""
if 'profile_page' in request.COOKIES:
path = request.path[:-1]
response.set_cookie('profile_page', max_age=0, path=path)
def process_response(self, request, response):
self.clear_profiling_cookies(request, response)
# do not profile ajax requests for now
if not self.is_enabled(request) or request.is_ajax():
return response
return response
| 36.154472
| 78
| 0.660895
|
e9366c132f964757d915262d9290e35ad84cbb21
| 1,017
|
py
|
Python
|
examples/operation_layerSet.py
|
13751742405/photoshop-python-api
|
5fe9b46dd2b2b4e2e1e6ef99a68d68b4fc032a70
|
[
"MIT"
] | null | null | null |
examples/operation_layerSet.py
|
13751742405/photoshop-python-api
|
5fe9b46dd2b2b4e2e1e6ef99a68d68b4fc032a70
|
[
"MIT"
] | null | null | null |
examples/operation_layerSet.py
|
13751742405/photoshop-python-api
|
5fe9b46dd2b2b4e2e1e6ef99a68d68b4fc032a70
|
[
"MIT"
] | null | null | null |
"""A examples to show you how to operation layerSet."""
from photoshop import Session
with Session(action="new_document") as ps:
docRef = ps.active_document
# Add a new layerSet.
new_layer_set = docRef.layerSets.add()
# Print the layerSet count.
ps.echo(docRef.layerSets.length)
ps.echo(len(docRef.layerSets))
# Rename the layerSet.
docRef.layerSets[0].name = "New Name"
ps.echo(new_layer_set.name)
# Change the layerSet opacity
new_layer_set.opacity = 90
ps.echo(new_layer_set.opacity)
# Duplicate the layerSet.
duplicate_layer_set = new_layer_set.duplicate()
# Add a new artLayer in current active document.
layer = docRef.artLayers.add()
# Move the artLayer under the duplicate layerSet.
layer.move(duplicate_layer_set, ps.ElementPlacement.PlaceInside)
# Merge the layerSet.
merged_layer = duplicate_layer_set.merge()
ps.echo(merged_layer.name)
# Set visible.
new_layer_set.visible = False
merged_layer.remove()
| 29.911765
| 68
| 0.712881
|
a0e18eb16b60165b1082cba92b7b84da90f3d169
| 25,921
|
py
|
Python
|
python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py
|
xingjing1/Paddle
|
af886995ac38bd26588de33205a19eb1e72fecbf
|
[
"Apache-2.0"
] | 3
|
2017-05-11T11:10:13.000Z
|
2017-10-23T09:13:14.000Z
|
python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py
|
gongweibao/Paddle
|
c91b1e039f29a62fb3050f979afecd71eabd734f
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py
|
gongweibao/Paddle
|
c91b1e039f29a62fb3050f979afecd71eabd734f
|
[
"Apache-2.0"
] | 2
|
2021-02-19T06:42:29.000Z
|
2021-02-26T12:16:05.000Z
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle.fluid import core, unique_name
from functools import reduce
from paddle.distributed.fleet.meta_optimizers.common import is_loss_grad_op
from paddle.distributed.fleet.meta_optimizers.common import OpRole, OP_ROLE_KEY, OP_ROLE_VAR_KEY
import re
import os
def check_broadcast(block):
"""
if a var is broadcasted, it should have a sync_comm before
this var is used, if not, raise error.
if the broadcasted var has a fill_constant op, the fill_constant
op should stay forward before the broadcast op, and before a
sync_calc op. Otherwise, raise error.
should ignore and skip broadcast_op of inner_parallelism (e.g. Megatron)
"""
broadcast_vars = {}
for idx, op in enumerate(block.ops):
if op.type == "c_broadcast":
if op.all_attrs()["use_calc_stream"] == False:
var_name = op.desc.input_arg_names()[0]
if "@BroadCast" in var_name:
if var_name in broadcast_vars:
raise ValueError("var_name areadly exist: {}"
"the old pos is {}, the new pos is {}".
format(var_name, broadcast_vars[
var_name]["broadcast_pos"], idx))
broadcast_vars[var_name] = {
"fill_constant_pos": -1,
"broadcast_pos": idx,
}
for idx, op in enumerate(block.ops):
if op.type == "fill_constant":
var_name = op.desc.output_arg_names()[0]
if var_name in broadcast_vars:
broadcast_vars[var_name]["fill_constant_pos"] = idx
continue
last_sync_comm_op_idx = -1
last_sync_calc_op_idx = -1
for idx, op in enumerate(block.ops):
if op.type == "c_sync_comm_stream":
last_sync_comm_op_idx = idx
continue
if op.type == "c_sync_calc_stream":
last_sync_calc_op_idx = idx
continue
if op.type == "c_broadcast":
if op.all_attrs()["use_calc_stream"] == False:
var_name = op.desc.input_arg_names()[0]
if "@BroadCast" in var_name:
if broadcast_vars[var_name]["fill_constant_pos"] != -1:
assert (last_sync_calc_op_idx != -1)
assert (broadcast_vars[var_name]["fill_constant_pos"] <
last_sync_calc_op_idx)
assert (last_sync_calc_op_idx < idx)
continue
for input_name in op.desc.input_arg_names():
if input_name in broadcast_vars:
assert (broadcast_vars[input_name]["broadcast_pos"] != -1)
assert (broadcast_vars[input_name]["broadcast_pos"] <
last_sync_comm_op_idx)
assert (last_sync_comm_op_idx < idx)
return
def check_allreduce_sum(block, shard, sharding_ring_id, dp_ring_id=-1):
"""
the op order should be:
grad:
- 0: op that generate Var
- 1: sync_calc
- 2: reduce_sum_sharding (allreduce --> reduce)
- 3: sync_comm
- 4: allreuce_sum_dp (dp_grads)
- 5: sync_comm (dp_grads)
- 6: op that use Var (dp_grads & sum)
should ignore and skip allreduce_op of inner_parallelism (e.g. Megatron)
"""
vars_status = {}
dp_grads_status = {}
idx_last_grad_allreduce = -1
idx_amp_allreduce = -1
idx_gradient_clip_allreduce = -1
for idx, op in enumerate(block.ops):
# sharding use both allreduce and reduce to sync grad
if op.type == "c_allreduce_sum" or op.type == "c_reduce_sum":
if op.all_attrs()["use_calc_stream"] == False:
ring_id = op.desc.attr("ring_id")
var_name = op.desc.input_arg_names()[0]
param = var_name.split("@")[0]
assert 'sum' in var_name or ("@GRAD" in var_name)
if 'sum' in var_name or (not shard.has_param(param)):
vars_status[var_name] = -1
else:
dp_grads_status[var_name] = -1
if ring_id != sharding_ring_id:
assert shard.has_param(param)
assert ring_id == dp_ring_id
if "sum" in var_name:
idx_amp_allreduce = idx
elif "@GRAD":
idx_last_grad_allreduce = idx
if op.type == "c_allreduce_max":
idx_gradient_clip_allreduce = idx
for op in block.ops:
if op.type == "c_sync_calc_stream":
for var_name in vars_status:
if var_name in vars_status and vars_status[var_name] == 0:
vars_status[var_name] = 1
for var_name in dp_grads_status:
if var_name in dp_grads_status and dp_grads_status[
var_name] == 0:
dp_grads_status[var_name] = 1
# check sharding allreduce and reduce but skip megatron allreduce
elif op.type == "c_allreduce_sum" or op.type == "c_reduce_sum":
if op.all_attrs()["use_calc_stream"] == False:
var_name = op.desc.input_arg_names()[0]
ring_id = op.desc.attr("ring_id")
if ring_id == sharding_ring_id:
assert op.type == "c_reduce_sum", "Grad in Sharding group should be reduce rather than allreduce"
if var_name in vars_status:
_status = vars_status[var_name]
else:
_status = dp_grads_status[var_name]
if _status == -1:
raise ValueError("{} is not generated, but you are"
"trying to all-reduce it".format(
var_name))
if _status == 0:
raise ValueError("There should be a sync_calc op "
"after generate Var: {} and before the"
"c_allreduce_sum op".format(var_name))
assert (_status == 1)
if var_name in vars_status:
vars_status[var_name] = 2
else:
dp_grads_status[var_name] = 2
else:
assert ring_id == dp_ring_id
param = var_name.split("@")[0]
assert shard.has_param(param)
assert dp_grads_status[var_name] == 3
dp_grads_status[var_name] = 4
elif op.type == "c_sync_comm_stream":
var_name = op.desc.input_arg_names()[0]
ring_id = op.desc.attr("ring_id")
if ring_id == sharding_ring_id:
for var_name in op.desc.input_arg_names():
if var_name in vars_status:
assert vars_status[var_name] == 2
vars_status[var_name] = 3
elif var_name in dp_grads_status:
assert dp_grads_status[var_name] == 2
dp_grads_status[var_name] = 3
else:
for var_name in op.desc.input_arg_names():
param = var_name.split("@")[0]
assert ring_id == dp_ring_id
assert shard.has_param(param)
assert dp_grads_status[var_name] == 4
dp_grads_status[var_name] = 5
else:
for input_name in op.desc.input_arg_names():
if input_name in vars_status:
if vars_status[input_name] != 3:
raise ValueError("There should be a sync_comm op "
"after allreduce the Var: {}".format(
input_name))
raise ValueError(
"The reduce output grad [{}] should NOT be be used in Non-root rank.".
format(input_name))
if input_name in dp_grads_status:
if dp_ring_id == -1:
if dp_grads_status[input_name] != 3:
raise ValueError("There should be a sync_comm op "
"after allreduce the Var: {}".
format(input_name))
else:
if dp_grads_status[input_name] != 5:
raise ValueError(
"The grad in shard should be allreduce and sync"
"twice before usage {}".format(input_name))
for output_name in op.desc.output_arg_names():
if output_name in vars_status and \
vars_status[output_name] == -1:
vars_status[output_name] = 0
if output_name in dp_grads_status and \
dp_grads_status[output_name] == -1:
dp_grads_status[output_name] = 0
# check sharding with amp
if idx_amp_allreduce != -1:
assert idx_amp_allreduce > idx_last_grad_allreduce
# check sharding with gradient_clip_by_global_norm
if idx_gradient_clip_allreduce != -1:
assert idx_gradient_clip_allreduce > idx_last_grad_allreduce
return
def get_valid_op_role(block, insert_idx):
"""
return OpRole.Forward or OpRole.Backward
"""
op_role = block.ops[insert_idx].attr('op_role')
if (insert_idx >= len(block.ops)) or (
op_role in [int(OpRole.Backward), int(OpRole.Optimize)]):
return OpRole.Backward
if op_role in [int(OpRole.Forward), int(OpRole.Loss)]:
return OpRole.Forward
return get_valid_op_role(block, insert_idx + 1)
def insert_sync_calc_op(block, insert_idx, calc_dep_vars):
"""
_insert_sync_calc_op
"""
op_role = get_valid_op_role(block, insert_idx)
block._insert_op_without_sync(
insert_idx,
type='c_sync_calc_stream',
inputs={'X': calc_dep_vars},
outputs={'Out': calc_dep_vars},
attrs={OP_ROLE_KEY: op_role})
return
def insert_sync_comm_op(block, insert_idx, ring_id, comm_dep_vars):
"""
insert sync_comm_op for single var
"""
op_role = get_valid_op_role(block, insert_idx)
block._insert_op_without_sync(
insert_idx,
type='c_sync_comm_stream',
inputs={'X': comm_dep_vars},
outputs={'Out': comm_dep_vars},
attrs={'ring_id': ring_id,
OP_ROLE_KEY: op_role})
return 1
def insert_sync_comm_ops(block, insert_idx, ring_id, comm_dep_vars):
"""
insert sync_comm_op for vars
"""
# NOTE (JZ-LIANG) to be check, may result undefined case
if len(comm_dep_vars) == 0:
return 0
op_role = get_valid_op_role(block, insert_idx)
block._insert_op_without_sync(
insert_idx,
type='c_sync_comm_stream',
inputs={'X': comm_dep_vars},
outputs={'Out': comm_dep_vars},
attrs={'ring_id': int(ring_id),
OP_ROLE_KEY: op_role})
return 1
def insert_fill_constant_ops(block, insert_idx, fill_constant_vars):
"""
_add_fill_constant_ops
"""
op_role = get_valid_op_role(block, insert_idx)
for broadcast_name in fill_constant_vars:
broadcast_var = block.var(broadcast_name)
block._insert_op_without_sync(
insert_idx,
type="fill_constant",
outputs={"Out": broadcast_var.name},
attrs={
"shape": broadcast_var.shape,
"dtype": broadcast_var.dtype,
"value": 0.0,
OP_ROLE_KEY: op_role
})
return
def insert_cast_ops(block, insert_idx, cast_ops):
"""
_add_cast_ops
"""
op_role = get_valid_op_role(block, insert_idx)
for fp16_name, fp32_name in cast_ops.items():
block._insert_op_without_sync(
insert_idx,
type="cast",
inputs={"X": fp32_name},
outputs={"Out": fp16_name},
attrs={
"in_dtype": core.VarDesc.VarType.FP32,
"out_dtype": core.VarDesc.VarType.FP16,
OP_ROLE_KEY: op_role
})
return
def insert_allreduce_ops(block,
insert_idx,
ring_id,
allreduce_vars,
op_role=OpRole.Backward,
use_calc_stream=False,
user_defined_strategy=None):
"""
_add_allreduce_ops
"""
if len(allreduce_vars) == 0:
return
if user_defined_strategy and user_defined_strategy.fuse_all_reduce_ops:
insert_fused_allreduce_ops(block, insert_idx, ring_id, allreduce_vars,
op_role, use_calc_stream,
user_defined_strategy.fuse_grad_size_in_MB)
else:
for var in allreduce_vars:
block._insert_op_without_sync(
insert_idx,
type='c_allreduce_sum',
inputs={'X': var},
outputs={'Out': var},
attrs={
'ring_id': ring_id,
'use_calc_stream': use_calc_stream,
OP_ROLE_KEY: op_role
})
return
def insert_fused_allreduce_ops(block,
insert_idx,
ring_id,
allreduce_vars,
op_role=OpRole.Backward,
use_calc_stream=False,
fuse_grad_size_in_MB=32):
segments = []
cur_size = 0.
last_dtype = None
for var in allreduce_vars:
real_var = block.var(var)
var_size = get_var_size(real_var)
if cur_size + var_size > fuse_grad_size_in_MB \
or len(segments) == 0 \
or real_var.dtype != last_dtype:
segments.append([real_var])
cur_size = var_size
last_dtype = real_var.dtype
else:
segments[-1].append(real_var)
cur_size += var_size
fused_vars = []
for segment in segments:
tmp_var = block.create_var(
name=unique_name.generate('FusedOutput_{}'.format(segment[0].name)),
dtype=segment[0].dtype,
persistable=False,
stop_gradient=True)
fused_vars.append(tmp_var)
block._insert_op_without_sync(
insert_idx,
type="coalesce_tensor",
inputs={"Input": segment},
outputs={"Output": segment,
"FusedOutput": tmp_var},
attrs={
"copy_data": True,
"use_align": True,
"dtype": segment[0].dtype,
OP_ROLE_KEY: op_role
})
for fused_var in fused_vars:
block._insert_op_without_sync(
insert_idx + len(fused_vars),
type='c_allreduce_sum',
inputs={'X': fused_var},
outputs={'Out': fused_var},
attrs={
'ring_id': ring_id,
'use_calc_stream': use_calc_stream,
OP_ROLE_KEY: op_role
})
if not use_calc_stream:
block._insert_op_without_sync(
insert_idx + len(fused_vars),
type='c_sync_calc_stream',
inputs={'X': fused_var},
outputs={'Out': fused_var},
attrs={OP_ROLE_KEY: op_role})
def insert_reduce_ops(block,
insert_idx,
ring_id,
reduce_vars,
shard,
op_role=OpRole.Backward,
use_calc_stream=False):
"""
_add_allreduce_ops
"""
for var in reduce_vars:
root_id = get_grad_device(var, shard)
assert root_id >= 0, "root id should be a positive int, but now root id is {}".format(
root_id)
block._insert_op_without_sync(
insert_idx,
type='c_reduce_sum',
inputs={'X': var},
outputs={'Out': var},
attrs={
'ring_id': ring_id,
'root_id': root_id,
'use_calc_stream': use_calc_stream,
OP_ROLE_KEY: op_role
})
return
def get_grad_device(grad_name, shard):
assert "@GRAD" in grad_name, "[{}] should be a grad variable.".format(
grad_name)
base_name = None
# mind the traversal order
possible_suffixes = [
'.cast_fp16@GRAD@MERGED', '.cast_fp16@GRAD', '@GRAD@MERGED', '@GRAD'
]
for suffix in possible_suffixes:
if suffix in grad_name:
base_name = re.sub(suffix, '', grad_name)
break
assert base_name in shard.global_param2device, "[{}] should be a param variable.".format(
base_name)
return shard.global_param2device[base_name]
def get_first_check_finite_and_unscale_op_idx(block, raise_error=True):
for idx, op in enumerate(block.ops):
if op.type == "check_finite_and_unscale":
return idx
if raise_error:
raise ValueError(
"amp is turned on but check_finite_and_unscale op does not exist in main block"
)
return -1
def insert_broadcast_ops(block, insert_idx, ring_id, broadcast2root):
"""
_add_broadcast_ops
"""
op_role = get_valid_op_role(block, insert_idx)
for broadcast_name, root_device in broadcast2root:
block._insert_op_without_sync(
insert_idx,
type='c_broadcast',
inputs={'X': broadcast_name},
outputs={'Out': broadcast_name},
attrs={
'ring_id': ring_id,
'root': root_device,
OP_ROLE_KEY: op_role
})
return
DtypeToSize = {
core.VarDesc.VarType.FP16: 2,
core.VarDesc.VarType.FP32: 4,
core.VarDesc.VarType.FP64: 8,
core.VarDesc.VarType.INT16: 2,
core.VarDesc.VarType.INT32: 4,
core.VarDesc.VarType.INT64: 8,
core.VarDesc.VarType.BOOL: 1,
core.VarDesc.VarType.UINT8: 1,
}
def get_var_size(param):
"""
input:
- param: var
return:
var size in MB
"""
assert -1 not in param.shape
return reduce(lambda x, y: x * y,
param.shape) * DtypeToSize[param.dtype] / 1024.0 / 1024.0
def insert_scale_loss_grad_ops(block, scale=1.0):
'''
In order to keep the learning rate consistent in different numbers of
training workers, we scale the loss grad by the number of workers
'''
for idx, op in reversed(list(enumerate(block.ops))):
if is_loss_grad_op(op):
loss_grad_var = block.vars[op.output_arg_names[0]]
block._insert_op_without_sync(
idx + 1,
type='scale',
inputs={'X': loss_grad_var},
outputs={'Out': loss_grad_var},
attrs={'scale': scale,
OP_ROLE_KEY: OpRole.Backward})
break
def comm_analyse(main_program):
"""
Analyse the parameter size that need to be broadcast/allreduce during sharding training
"""
reduce_vars = {}
broadcast_vars = {}
block = main_program.global_block()
for op in block.ops:
if op.type == "c_broadcast":
var_name = op.desc.input_arg_names()[0]
# convert MB to KB
broadcast_vars[var_name] = get_var_size(block.var(
var_name)) * 1024.0
elif op.type == "c_allreduce_sum":
var_name = op.desc.input_arg_names()[0]
reduce_vars[var_name] = get_var_size(block.var(var_name)) * 1024.0
varsize_count = {}
gap = 1
for k, v in broadcast_vars.items():
print("broadcast: {}: {} KB".format(k, v))
if (int(v / gap) in varsize_count):
varsize_count[int(v / gap)] += 1
else:
varsize_count[int(v / gap)] = 1
for k, v in reduce_vars.items():
print("allreduce: {}: {} KB".format(k, v))
if (int(v / gap) in varsize_count):
varsize_count[int(v / gap)] += 1
else:
varsize_count[int(v / gap)] = 1
with open("nccl_size.txt", 'w') as f:
sorted_varsize = sorted(varsize_count.items(), key=lambda x: x[0])
for varsize, count in sorted_varsize:
print("NCCL size {}~{} KB: {}".format(varsize, varsize + 1, count))
f.write("NCCL size {}~{} KB: {}\n".format(varsize, varsize + 1,
count))
def add_sync_comm(program, sharding_ring_id):
"""
When clone a test prog by clone from the sharding main prog,
part of the sync_comm op maybe be pruned by mistake, this function
add the sync_comm op for the test prog.
"""
#NOTE (liangjianzhong): only support one comm stream by now, use more than one
# comm streams will cause error. should be revise in future.
assert sharding_ring_id >= 0, "sharding_ring_id should larger than zero"
block = program.global_block()
not_sync_vars = set([])
for op in block.ops:
if op.type in ["c_broadcast", "c_allreduce"]:
for input_name in op.desc.input_arg_names():
not_sync_vars.add(input_name)
if op.type == "c_sync_comm_stream":
for input_name in op.desc.input_arg_names():
not_sync_vars.remove(input_name)
if not_sync_vars:
block.append_op(
type='c_sync_comm_stream',
inputs={'X': list(not_sync_vars)},
outputs={'Out': list(not_sync_vars)},
attrs={
'ring_id': sharding_ring_id,
'op_role': core.op_proto_and_checker_maker.OpRole.Forward
})
return
def save_persistables(exe, dirname, main_program, filename=None):
"""
When use sharding, part of persistable vars are unique and are partitioned in different ranks,
and part of persistable vars are duplicated and exist in all the ranks with different values.
This function handles the model saving for sharding training.
"""
# TODO (JZ-LIANG) revise this for uniform mixed parallelism
if main_program._pipeline_opt:
main_program = main_program._pipeline_opt['section_program']
def is_opt_vars(var):
# NOTE(JZ-LIANG): The checks should be updated when add new compatible optimizer
# now only Momentum and adam are compatible with sharding
checks = [
"_moment1_0", "_moment2_0", "_beta1_pow_acc_0", "_beta2_pow_acc_0",
"_velocity_0"
]
for check in checks:
if var.name.endswith(check):
return True
return False
def is_gradient_merge_vars(var):
# NOTE(JZ-LIANG): to revise save/load logic in framework instead of write this naive rule
return var.name.endswith("@GradiantMerge")
def is_trainable(var):
return isinstance(var,
paddle.fluid.framework.Parameter) and var.trainable
def sharding_predicate(var):
return is_trainable(var) or is_opt_vars(var) or is_gradient_merge_vars(
var)
if int(os.environ.get('PADDLE_TRAINER_ID', 0)) == 0:
paddle.fluid.io.save_persistables(
exe, dirname, main_program=main_program, filename=None)
else:
paddle.fluid.io.save_vars(
exe,
dirname,
main_program=main_program,
predicate=sharding_predicate,
filename=None)
return
def get_grad_device(grad_name, shard):
assert "@GRAD" in grad_name, "[{}] should be a grad variable.".format(
grad_name)
base_name = None
# mind the traversal order
possible_suffixes = ['.cast_fp16@GRAD', '@GRAD']
for suffix in possible_suffixes:
if suffix in grad_name:
base_name = re.sub(suffix, '', grad_name)
break
assert base_name in shard.global_param2device, "[{}] should be a param variable.".format(
base_name)
return shard.global_param2device[base_name]
def append_naive_sync(block, sync_var, ring_id):
# NOTE (JZ-LIANG) update this to use barrier sync for more elegent logic
# sync within global
block.append_op(
type="fill_constant",
outputs={"Out": sync_var},
attrs={
"shape": sync_var.shape,
"dtype": sync_var.dtype,
"value": int(1),
})
block.append_op(
type='c_allreduce_sum',
inputs={'X': sync_var},
outputs={'Out': sync_var},
attrs={
'ring_id': ring_id,
'use_calc_stream': True,
OP_ROLE_KEY: OpRole.Forward
})
block.append_op(
type='c_sync_calc_stream',
inputs={'X': [sync_var]},
outputs={'Out': [sync_var]},
attrs={OP_ROLE_KEY: OpRole.Forward})
| 36.152022
| 117
| 0.560164
|
4582afb04af443eb0c88aaee9ab2f20917b9d340
| 10,418
|
py
|
Python
|
fabfile.py
|
yashpatel12/CPIMS-api-newtest
|
d5129eb3aa034f70414a2471a72c0a74ad95f6ca
|
[
"Apache-2.0"
] | null | null | null |
fabfile.py
|
yashpatel12/CPIMS-api-newtest
|
d5129eb3aa034f70414a2471a72c0a74ad95f6ca
|
[
"Apache-2.0"
] | null | null | null |
fabfile.py
|
yashpatel12/CPIMS-api-newtest
|
d5129eb3aa034f70414a2471a72c0a74ad95f6ca
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import with_statement
from fabric.api import settings, env, prefix
from fabric.contrib.console import confirm
from fabric.operations import sudo, run, local, put
from fabric.context_managers import cd
import os
env.hosts = [os.environ.get('CPIMS_APP_HOST')]
env.user = os.environ.get('CPIMS_APP_USER')
env.key_filename = os.environ.get('CPIMS_KEY_FILENAME')
src_dir = os.environ.get('CPIMS_SRC_DIR')
deploy_dir = os.environ.get('CPIMS_DEPLOY_DIR')
target_dir = os.environ.get('CPIMS_TARGET_DIR')
cpims_venv = os.environ.get('CPIMS_VENV')
cpims_host = os.environ.get('CPIMS_DB_HOST')
cpims_password = os.environ.get('CPIMS_DB_PASSWORD')
cpims_db = os.environ.get('CPIMS_DB')
cpims_port = os.environ.get('CPIMS_DB_PORT')
cpims_dbuser = os.environ.get('CPIMS_DB_USER')
cpims_debug = os.environ.get('CPIMS_DEBUG')
def install_pg_bdr():
"setting up postgres-bdr as the default postgres db"
run("sudo yum install -y epel-release")
run("sudo yum install -y https://download.postgresql.org/pub/repos/yum/9.4/redhat/rhel-7-x86_64/pgdg-centos94-9.4-3.noarch.rpm")
run("sudo yum install -y --nogpgcheck http://packages.2ndquadrant.com/postgresql-bdr94-2ndquadrant/yum-repo-rpms/postgresql-bdr94-2ndquadrant-redhat-latest.noarch.rpm")
with settings(warn_only=True):
result = run("sudo yum -t check-update")
if result.return_code == 100:
run('sudo yum update -y')
run("sudo yum install -y --nogpgcheck postgresql-bdr94-bdr postgresql-bdr94-devel")
run("sudo /usr/pgsql-9.4/bin/postgresql94-setup initdb")
run("sudo systemctl start postgresql-9.4.service")
run("sudo systemctl enable postgresql-9.4.service")
source = "%s/configs/postgresql/*" %(os.environ.get('PWD'),)
put(local_path = source, remote_path ='/tmp/')
run("sudo -u postgres cp /tmp/pg_hba.conf /var/lib/pgsql/9.4-bdr/data/")
run("rm /tmp/pg_hba.conf")
run("sudo -u postgres cp /tmp/postgresql.conf /var/lib/pgsql/9.4-bdr/data/")
run("rm /tmp/postgresql.conf")
run("sudo systemctl restart postgresql-9.4.service")
def install_virtualenv():
run("sudo yum install -y epel-release")
run('sudo yum update -y')
run("sudo yum install -y gcc python2-pip python-devel python-setuptools memcached")
run("sudo pip install --upgrade pip")
run("sudo pip install virtualenv virtualenvwrapper uwsgi")
run("/usr/bin/echo 'export WORKON_HOME=~/.envs' >> /home/vagrant/.bash_profile")
#run("source /home/vagrant/.bash_profile")
run("/usr/bin/echo 'source /usr/bin/virtualenvwrapper.sh' >> /home/vagrant/.bash_profile")
run("mkvirtualenv %s" %(cpims_venv))
def install_cpims():
print "creating archive ..."
if os.path.isdir(deploy_dir):
local('rm -rf %s' %deploy_dir)
local('mkdir %s' %deploy_dir)
local('tar --exclude=cpims/configs --exclude=cpims/.git --exclude=.gitignore --exclude=*pyc --exclude=fabfile.py* -C %s -czvf %s/cpims.tar.gz cpims' %(src_dir,deploy_dir,))
put('%s/cpims.tar.gz' %(deploy_dir,), target_dir)
run('rm -rf %s/cpims' %(target_dir,))
run('tar -xzvf cpims.tar.gz')
run('rm cpims.tar.gz')
local('rm -rf %s' %deploy_dir)
with cd('/home/vagrant/cpims'), prefix('workon %s' %(cpims_venv)):
run('pip install -r requirements.txt')
def install_pg_configuration():
"install the pg connection details"
with cd("/home/vagrant"):
run("/usr/bin/echo 'CPIMS_HOST=%s' >> .bash_profile" %(cpims_host))
run("/usr/bin/echo 'CPIMS_PASSWORD=%s' >> .bash_profile" %(cpims_password))
run("/usr/bin/echo 'CPIMS_DB=%s' >> .bash_profile" %(cpims_db))
run("/usr/bin/echo 'CPIMS_PORT=%s' >> .bash_profile" %(cpims_port))
run("/usr/bin/echo 'CPIMS_DBUSER=%s' >> .bash_profile" %(cpims_dbuser))
run("/usr/bin/echo 'CPIMS_DEBUG=%s' >> .bash_profile" %(cpims_debug))
run("/usr/bin/echo 'export CPIMS_HOST' >> .bash_profile")
run("/usr/bin/echo 'export CPIMS_PASSWORD' >> .bash_profile")
run("/usr/bin/echo 'export CPIMS_DB' >> .bash_profile")
run("/usr/bin/echo 'export CPIMS_PORT' >> .bash_profile")
run("/usr/bin/echo 'export CPIMS_DBUSER' >> .bash_profile")
run("/usr/bin/echo 'export CPIMS_DEBUG' >> .bash_profile")
def setup_pg():
"install the pg users"
with settings(sudo_user='postgres') and cd('/var/lib/pgsql'):
sudo("psql -c \"create user %s with encrypted password '%s'\"" %(cpims_dbuser, cpims_password), user='postgres')
sudo("psql -c \"create database %s owner %s\"" %(cpims_db, cpims_dbuser), user='postgres')
def install_fixtures():
"installing basic fixtures for cpims"
with cd('%s/cpims' %(target_dir)), prefix('workon %s' %(cpims_venv)):
run("python manage.py makemigrations")
run("python manage.py migrate cpovc_auth")
run("python manage.py migrate")
run("python manage.py loaddata cpovc_auth/fixtures/initial_data.json")
run("python manage.py loaddata cpovc_main/fixtures/initial_user.json")
run("python manage.py loaddata cpovc_main/fixtures/initial_geo.json")
run("python manage.py loaddata cpovc_main/fixtures/list_general.csv.json")
run("python manage.py loaddata cpovc_main/fixtures/initial_facility1.csv.json")
run("python manage.py loaddata cpovc_main/fixtures/initial_facility2.csv.json")
run("python manage.py loaddata cpovc_main/fixtures/olmis_forms.csv.json")
run("python manage.py loaddata cpovc_main/fixtures/olmis_assessment.csv.json")
run("python manage.py loaddata cpovc_main/fixtures/olmis_household_assessment_3.json")
run("python manage.py loaddata cpovc_main/fixtures/olmis_registry.json")
run("python manage.py loaddata cpovc_main/fixtures/eligibility.json")
run("python manage.py loaddata cpovc_main/fixtures/olmis_services.csv.json")
run("python manage.py loaddata cpovc_main/fixtures/ovc_form_type_id.json")
run("python manage.py loaddata cpovc_main/fixtures/olmis_services.csv.json")
run("python manage.py loaddata cpovc_main/fixtures/ovc_form_type_id.json")
run("python manage.py createsuperuser")
run("python manage.py loaddata cpovc_main/fixtures/initial_org_unit.csv.json")
run("python manage.py loaddata cpovc_main/fixtures/initial_org_unit_contact.csv.json")
run("python manage.py loaddata cpovc_main/fixtures/initial_org_unit_geo.csv.json")
run("python manage.py loaddata cpovc_main/fixtures/initial_persons.csv.json")
run("python manage.py loaddata cpovc_main/fixtures/initial_person_type.csv.json")
run("python manage.py loaddata cpovc_main/fixtures/initial_persons_externalids.csv.json")
run("python manage.py loaddata cpovc_main/fixtures/initial_persons_geo.csv.json")
run("python manage.py loaddata cpovc_main/fixtures/initial_persons_org_units.csv.json")
def create_super_user():
with cd('%s/cpims' %(target_dir)), prefix('workon %s' %(cpims_venv)):
#run("python manage.py createsuperuser")
run("python manage.py loaddata cpovc_main/fixtures/initial_org_unit.csv.json")
run("python manage.py loaddata cpovc_main/fixtures/initial_org_unit_contact.csv.json")
run("python manage.py loaddata cpovc_main/fixtures/initial_org_unit_geo.csv.json")
run("python manage.py loaddata cpovc_main/fixtures/initial_persons.csv.json")
run("python manage.py loaddata cpovc_main/fixtures/initial_person_type.csv.json")
run("python manage.py loaddata cpovc_main/fixtures/initial_persons_externalids.csv.json")
run("python manage.py loaddata cpovc_main/fixtures/initial_persons_geo.csv.json")
run("python manage.py loaddata cpovc_main/fixtures/initial_persons_org_units.csv.json")
def configure_uwsgi():
handle = open('%s/configs/uwsgi/cpims.ini' %(os.environ.get('PWD')), 'w')
handle.write('[uwsgi]\n')
handle.write('project = cpims\n')
handle.write('username = %s\n' %(env.user,))
handle.write('base = /home/%(username)\n')
handle.write('chdir = %(base)/%(project)\n')
handle.write('home = %%(base)/.envs/%s\n' %(cpims_venv,))
handle.write('module = %(project).wsgi:application\n')
handle.write('master = true\n')
handle.write('processes = 5\n')
handle.write('uid = %(username)\n')
handle.write('socket = /run/uwsgi/%(project).sock\n')
handle.write('chown-socket = %(username):nginx\n')
handle.write('chmod-socket = 660\n')
handle.write('vacuum = true\n')
handle.write('env = CPIMS_HOST=%s\n' %(cpims_host,))
handle.write('env = CPIMS_DB=%s\n' %(cpims_db,))
handle.write('env = CPIMS_DEBUG=%s\n' %(cpims_debug,))
handle.write('env = CPIMS_PORT=%s\n' %(cpims_port,))
handle.write('env = CPIMS_DBUSER=%s\n' %(cpims_dbuser,))
handle.write('env = CPIMS_PASSWORD=%s\n' %(cpims_password,))
handle.close()
def install_uwsgi():
run("sudo mkdir /etc/uwsgi")
run("sudo mkdir /etc/uwsgi/sites")
source = "%s/configs/uwsgi/cpims.ini" %(os.environ.get('PWD'),)
put(local_path = source, remote_path ='/tmp/')
run("sudo cp /tmp/cpims.ini /etc/uwsgi/sites/")
run("rm /tmp/cpims.ini")
source = "%s/scripts/uwsgi/uwsgi.service" %(os.environ.get('PWD'),)
put(local_path = source, remote_path ='/tmp/')
run("sudo cp /tmp/uwsgi.service /etc/systemd/system/")
run("rm /tmp/uwsgi.service")
run("sudo systemctl restart uwsgi")
run("sudo systemctl enable uwsgi")
def install_nginx():
run("sudo yum install -y nginx")
source = "%s/configs/nginx/nginx.conf" %(os.environ.get('PWD'),)
put(local_path = source, remote_path ='/tmp/')
run("sudo cp /tmp/nginx.conf /etc/nginx/")
run("rm /tmp/nginx.conf")
run("sudo chmod 750 /home/%s" %(env.user,))
run("sudo groupmems -a nginx -g %s" %(env.user,))
run("sudo systemctl restart nginx")
run("sudo systemctl enable nginx")
def configure_se_linux():
run("sudo setenforce 0")
source = "%s/configs/selinux/config" %(os.environ.get('PWD'),)
put(local_path = source, remote_path ='/tmp/')
run("sudo cp /tmp/config /etc/selinux/config")
run("rm /tmp/config")
def deploy():
install_pg_bdr()
setup_pg()
install_pg_configuration()
install_virtualenv()
install_cpims()
install_fixtures()
configure_uwsgi()
configure_se_linux()
install_nginx()
install_uwsgi()
| 50.328502
| 176
| 0.691304
|
1e2d22d60c96fc1c321ea2bb429dc8ad347f470b
| 3,721
|
py
|
Python
|
vendor/jx_python/cubes/aggs.py
|
klahnakoski/auth0-api
|
eda9c2554c641da76687f64445b8d35543d012d9
|
[
"MIT"
] | null | null | null |
vendor/jx_python/cubes/aggs.py
|
klahnakoski/auth0-api
|
eda9c2554c641da76687f64445b8d35543d012d9
|
[
"MIT"
] | null | null | null |
vendor/jx_python/cubes/aggs.py
|
klahnakoski/auth0-api
|
eda9c2554c641da76687f64445b8d35543d012d9
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import, division, unicode_literals
import itertools
from jx_base.domains import DefaultDomain, SimpleSetDomain
from jx_python import windows
from jx_python.expressions import jx_expression_to_function
from mo_collections.matrix import Matrix
from mo_dots import listwrap
from mo_logs import Log
def cube_aggs(frum, query):
select = listwrap(query.select)
#MATCH EDGES IN QUERY TO ONES IN frum
for e in query.edges:
for fs in frum.select:
if fs.name == e.value:
Log.error("Not implemented yet")
if isinstance(e.domain, DefaultDomain):
# DEFAULT DOMAINS CAN EASILY BE LOOKED UP FROM frum
for fe in frum.edges:
if fe.name == e.value:
e.domain = SimpleSetDomain(**fe.domain.__data__())
e.value = e.value + "." + fe.domain.key
break
else:
for fe in frum.edges:
if fe.name == e.value:
e.value = e.value + "." + fe.domain.key
break
result = {
s.name: Matrix(
dims=[len(e.domain.partitions) + (1 if e.allowNulls else 0) for e in query.edges],
zeros=s.default
)
for s in select
}
where = jx_expression_to_function(query.where)
for d in filter(where, frum.values()):
coord = [] # LIST OF MATCHING COORDINATE FAMILIES, USUALLY ONLY ONE PER FAMILY BUT JOINS WITH EDGES CAN CAUSE MORE
for e in query.edges:
matches = get_matches(e, d)
coord.append(matches)
if len(matches) == 1 and d[e.name] == None:
d[e.name] = e.domain.partitions[matches[0]]
for s in select:
mat = result[s.name]
agg = s.aggregate
var = s.value
expr = jx_expression_to_function(var)
val = expr(d)
if agg == "count":
if var == "." or var == None:
for c in itertools.product(*coord):
mat[c] += 1
continue
if val != None:
for c in itertools.product(*coord):
mat[c] += 1
else:
for c in itertools.product(*coord):
acc = mat[c]
if acc == None:
acc = windows.name2accumulator.get(agg)
if acc == None:
Log.error("select aggregate {{agg}} is not recognized", agg= agg)
acc = acc(**s)
mat[c] = acc
acc.add(val)
for s in select:
if s.aggregate == "count":
continue
m = result[s.name]
for c, var in m.items():
if var != None:
m[c] = var.end()
from jx_python.containers.cube import Cube
return Cube(select, query.edges, result)
def get_matches(e, d):
if e.value:
return [e.domain.getIndexByKey(d[e.value])]
elif e.range:
output = []
mi, ma = d[e.range.min], d[e.range.max]
var = e.domain.key
for p in e.domain.partitions:
if mi <= p[var] < ma:
output.append(p.dataIndex)
if e.allowNulls and not output:
output.append(len(e.domain.partitions)) # ENSURE THIS IS NULL
return output
| 33.223214
| 123
| 0.531846
|
3806b1539bef36ecbc1a381903bdfcb38a7c712d
| 287
|
py
|
Python
|
Training/Gradient Clipping/tf.clip_by_value.py
|
Asurada2015/TFAPI_translation
|
1c8d9432b0b8a21c2bb5670b25456d095d0a1ecf
|
[
"Apache-2.0"
] | 7
|
2017-10-19T13:59:24.000Z
|
2019-11-26T03:40:08.000Z
|
Training/Gradient Clipping/tf.clip_by_value.py
|
Asurada2015/TFAPI_translation
|
1c8d9432b0b8a21c2bb5670b25456d095d0a1ecf
|
[
"Apache-2.0"
] | null | null | null |
Training/Gradient Clipping/tf.clip_by_value.py
|
Asurada2015/TFAPI_translation
|
1c8d9432b0b8a21c2bb5670b25456d095d0a1ecf
|
[
"Apache-2.0"
] | 5
|
2018-08-22T02:57:03.000Z
|
2020-03-05T07:14:21.000Z
|
import tensorflow as tf
import numpy as np
# tf.clip_by_value(A, min, max):输入一个张量A,把A中的每一个元素的值都压缩在min和max之间。
# 小于min的让它等于min,大于max的元素的值等于max。
A = np.array([[1, 1, 2, 4], [3, 4, 8, 5]])
with tf.Session()as sess:
print(sess.run(tf.clip_by_value(A, 2, 5)))
#
# [[2 2 2 4]
# [3 4 5 5]]
| 26.090909
| 65
| 0.658537
|
ce3931601ca45ae4012c5071986d3a1fa9a1e28c
| 1,535
|
py
|
Python
|
OpenRobertaServer/src/test/resources/crossCompilerTests/_expected/common/targetLanguage/ev3dev/functionsBasic.py
|
RaghuvirShirodkar/openroberta-lab
|
ab73c72a593cdeb42925c9b279530110b17db136
|
[
"Apache-2.0"
] | null | null | null |
OpenRobertaServer/src/test/resources/crossCompilerTests/_expected/common/targetLanguage/ev3dev/functionsBasic.py
|
RaghuvirShirodkar/openroberta-lab
|
ab73c72a593cdeb42925c9b279530110b17db136
|
[
"Apache-2.0"
] | null | null | null |
OpenRobertaServer/src/test/resources/crossCompilerTests/_expected/common/targetLanguage/ev3dev/functionsBasic.py
|
RaghuvirShirodkar/openroberta-lab
|
ab73c72a593cdeb42925c9b279530110b17db136
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
from __future__ import absolute_import
from roberta.ev3 import Hal
from ev3dev import ev3 as ev3dev
import math
import os
import time
class BreakOutOfALoop(Exception): pass
class ContinueLoop(Exception): pass
_brickConfiguration = {
'wheel-diameter': 5.6,
'track-width': 18.0,
'actors': {
},
'sensors': {
},
}
hal = Hal(_brickConfiguration)
___n1 = 0
___b = False
___n2 = 1
___n3 = 4
def number():
global ___n1, ___b, ___n2, ___n3
___n1 = ___n2 + ___n3
def breakFunct():
global ___n1, ___b, ___n2, ___n3
if 5 == ___n1: return None
___n1 = ___n1 + 1000
def retBool():
global ___n1, ___b, ___n2, ___n3
___n1 = ___n1
return ___b
def retNumber():
global ___n1, ___b, ___n2, ___n3
___n1 = ___n1
return ___n1
def retNumber2(___x):
global ___n1, ___b, ___n2, ___n3
___x = ___x / float(2)
return ___x
def run():
global ___n1, ___b, ___n2, ___n3
# Basic Functions START
number()
breakFunct()
if not 5 == ___n1:
print("Assertion failed: ", "pos-1", 5, "EQ", ___n1)
___n1 = retNumber()
___b = retBool()
___n1 = retNumber2(10)
# Basic Functions END
def main():
try:
run()
except Exception as e:
hal.drawText('Fehler im EV3', 0, 0)
hal.drawText(e.__class__.__name__, 0, 1)
hal.drawText(str(e), 0, 2)
hal.drawText('Press any key', 0, 4)
while not hal.isKeyPressed('any'): hal.waitFor(500)
raise
if __name__ == "__main__":
main()
| 20.466667
| 60
| 0.624756
|
a55846ff3881906b2c805d9db3d1ad3f381b1bf0
| 1,905
|
py
|
Python
|
anime_downloader/extractors/mp4upload.py
|
itachi1706/anime-downloader
|
98a847b6af18c52ebf11c883965b562627057521
|
[
"Unlicense"
] | 1
|
2019-09-26T02:38:31.000Z
|
2019-09-26T02:38:31.000Z
|
anime_downloader/extractors/mp4upload.py
|
itachi1706/anime-downloader
|
98a847b6af18c52ebf11c883965b562627057521
|
[
"Unlicense"
] | null | null | null |
anime_downloader/extractors/mp4upload.py
|
itachi1706/anime-downloader
|
98a847b6af18c52ebf11c883965b562627057521
|
[
"Unlicense"
] | null | null | null |
import logging
import re
from bs4 import BeautifulSoup
from anime_downloader.extractors.base_extractor import BaseExtractor
from anime_downloader import session
session = session.get_session()
class MP4Upload(BaseExtractor):
'''Extracts video url from mp4upload embed pages, performs a request
back to the non-embed mp4upload page to extract the title of the video
albeit imperfectly as mp4upload doesn't place full title on the main
page of whichever video you are dealing with.
'''
def _get_data(self):
# Extract the important bits from the embed page, with thanks to the
# code I saw from github user py7hon in his/her mp4upload-direct
# program as inspiration for this. Only with regex.
source_parts_re = re.compile(
r'.*?100\|(.*?)\|.*?\|video\|(.*?)\|(\d+)\|.*?',
re.DOTALL)
mp4u_embed = session.get(self.url).text
domain, video_id, protocol = source_parts_re.match(mp4u_embed).groups()
logging.debug('Domain: %s, Video ID: %s, Protocol: %s' %
(domain, video_id, protocol))
url = self.url.replace('embed-', '')
# Return to non-embed page to collect title
mp4u_page = BeautifulSoup(session.get(url).text, 'html.parser')
title = mp4u_page.find('span', {'class': 'dfilename'}).text
title = title[:title.rfind('_')][:title.rfind('.')].replace(' ', '_')
logging.debug('Title is %s' % title)
# Create the stream url
stream_url = 'https://{}.mp4upload.com:{}/d/{}/{}.mp4'
stream_url = stream_url.format(domain, protocol, video_id, title)
logging.debug('Stream URL: %s' % stream_url)
return {
'stream_url': stream_url,
'meta': {
'title': title,
'thumbnail': ''
}
}
| 35.943396
| 80
| 0.6
|
2cfd3e3b8c13404d89233f2ba95ede79e03966e1
| 2,151
|
py
|
Python
|
v1_backend/src/v1_awattprice/fastapi_conf/api.py
|
sp4c38/AwattarApp
|
b914e8042e5cdcb84485d6d45133a00244662bda
|
[
"BSD-3-Clause"
] | 2
|
2020-09-06T18:17:20.000Z
|
2020-09-06T19:06:19.000Z
|
v1_backend/src/v1_awattprice/fastapi_conf/api.py
|
sp4c38/AwattarApp
|
b914e8042e5cdcb84485d6d45133a00244662bda
|
[
"BSD-3-Clause"
] | null | null | null |
v1_backend/src/v1_awattprice/fastapi_conf/api.py
|
sp4c38/AwattarApp
|
b914e8042e5cdcb84485d6d45133a00244662bda
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
AWattPrice API module
Poll the Awattar API
"""
__author__ = "Frank Becker <fb@alien8.de>"
__copyright__ = "Frank Becker"
__license__ = "mit"
from fastapi import BackgroundTasks, FastAPI, Request, status
from fastapi.responses import JSONResponse
from v1_awattprice import apns
from v1_awattprice import poll
from v1_awattprice.config import read_config
from v1_awattprice.defaults import Region
from v1_awattprice.types import APNSToken
from v1_awattprice.utils import start_logging
from v1_awattprice.v2_backend_handler import handle_new_apns_data
api = FastAPI()
@api.get("/")
async def root():
return {"message": "Nothing here. Please, move on."}
@api.get("/data/")
async def no_region(background_tasks: BackgroundTasks):
"""Return data if no region is given for Germany."""
region = Region.DE
data, _ = await poll.get_data(config=config, region=region)
headers = await poll.get_headers(config=config, data=data)
return JSONResponse(content=data, headers=headers)
@api.get("/data/{region_id}")
async def with_region(region_id, background_tasks: BackgroundTasks):
"""Return data for the given region."""
region = getattr(Region, region_id.upper(), None)
if not region:
return {"prices": []}
data, _ = await poll.get_data(config=config, region=region)
headers = await poll.get_headers(config=config, data=data)
return JSONResponse(content=data, headers=headers)
@api.post("/data/apns/send_token")
async def send_token(request: Request, background_tasks: BackgroundTasks):
request_body = await request.body()
request_data: APNSToken = apns.validate_token(request_body)
if request_data is not None:
background_tasks.add_task(handle_new_apns_data, request_data)
return JSONResponse({"tokenWasPassedSuccessfully": True}, status_code=status.HTTP_200_OK)
else:
return JSONResponse(
{"tokenWasPassedSuccessfully": False},
status_code=status.HTTP_400_BAD_REQUEST,
)
@api.on_event("startup")
def startup_event():
global config
config = read_config()
start_logging(config)
| 29.067568
| 97
| 0.734077
|
3b3f9181722b2e0892c9f98efeb764852f50dd26
| 3,571
|
py
|
Python
|
0_hcphotonics/usbcounter/arthurparse.py
|
zhengyang-c/photonLauncher
|
76215f47ccd1178f1826834533f5702c4b8f2c35
|
[
"Apache-2.0"
] | null | null | null |
0_hcphotonics/usbcounter/arthurparse.py
|
zhengyang-c/photonLauncher
|
76215f47ccd1178f1826834533f5702c4b8f2c35
|
[
"Apache-2.0"
] | null | null | null |
0_hcphotonics/usbcounter/arthurparse.py
|
zhengyang-c/photonLauncher
|
76215f47ccd1178f1826834533f5702c4b8f2c35
|
[
"Apache-2.0"
] | null | null | null |
import Gnuplot
import time
import argparse
import os, sys
import Gnuplot, Gnuplot.PlotItems, Gnuplot.funcutils
import json
import random
import numpy as np
def main():
parser = argparse.ArgumentParser(description = "Plots parsed json objects from usbcounter, arthur.py script. Plots a histogram of the counts by default. ")
parser.add_argument('dir', metavar = 'timestamp', nargs = '+', help = "Timestamp of json file to be plotted")
parser.add_argument('title', metavar = 'title', nargs = '+', help = "Title to be included in plot.")
parser.add_argument('--verbose', dest='verbose', action='store_true', help = "Print error messages. Does not do so by default.")
parser.set_defaults(verbose=False)
args = parser.parse_args()
a = arthurParse()
a.load(args.dir[0], args.title[0], args.verbose)
a.plot()
def iqr(x):
iqr = np.subtract(*np.percentile(x, [75, 25]))
return 2 * iqr * len(x) ** (float(-1)/float(3))
class arthurParse():
def __init__(self):
self.cfg = {}
with open('cfg/.arthurparse') as f:
x = f.read()
x = x.split('\n')
for i in x:
if len(i) > 0:
i = i.rstrip()
i = i.split('=')
self.cfg[i[0]] = i[1]
print "\n warning: many components are hardcoded. may break easily. \n"
def load(self, path, title, verbose):
self.d1 = []
self.d2 = []
self.path = path
self.title = title
self.titlepath = ' '.join(path.split('_'))
self.fpath = 'jsondata/' + path
with open(self.fpath + '.json', 'rb+') as datafile:
self.data = json.load(datafile)
with open(self.fpath, 'wb+') as rawfile:
for i in xrange(len(self.data['counts'])):
try:
rawfile.write('{}\t{}\n'.format(self.data['counts'][i][1][0], self.data['counts'][i][1][1]))
self.d1.append(self.data['counts'][i][1][0])
self.d2.append(self.data['counts'][i][1][1])
except IndexError:
if verbose:
print ('IndexError. Are you using the wrong datafile without timedata? If not, the usbcounter did not respond in time. Ignoring ')
else:
pass
def plot(self):
self.initPlot()
self.plotDet0()
self.plotDet1()
def plotDet0(self):
self.g('set title "{} {}, detector 0, duration {} intervals"'.format(self.titlepath, self.title, self.data['duration']))
self.g('set output "{}_0.eps"'.format(self.fpath, self.cfg['format']))
self.g('binwidth = {}'.format(iqr(self.d1)))
self.g('plot "{}" using (bin($1,binwidth)):(1.0) smooth freq with boxes'.format(self.fpath))
def plotDet1(self):
self.g('set title "{} {}, detector 1, duration {} intervals"'.format(self.titlepath, self.title, self.data['duration']))
self.g('set output "{}_1.eps"'.format(self.fpath, self.cfg['format']))
self.g('binwidth = {}'.format(iqr(self.d2)))
self.g('plot "{}" using (bin($2,binwidth)):(1.0) smooth freq with boxes'.format(self.fpath))
def initPlot(self):
self.g = Gnuplot.Gnuplot()
self.g('set term {}'.format(self.cfg['format']))
self.g('set xlabel "{}"'.format(self.cfg['xlabel']))
self.g('set ylabel "{}"'.format(self.cfg['ylabel']))
#self.g('set yrange [0:100]')
self.g('bin(x,width)=width*floor(x/width)')
def fit(self):
main()
| 41.523256
| 159
| 0.565668
|
f940574abd08b0846e061d128115e9e9b4edfbc0
| 13,151
|
py
|
Python
|
coresched_vs.py
|
dreibh/planetlab-lxc-nodemanager
|
e3b9608c2e4184851f1fd2be7e449e62153789cf
|
[
"BSD-3-Clause"
] | null | null | null |
coresched_vs.py
|
dreibh/planetlab-lxc-nodemanager
|
e3b9608c2e4184851f1fd2be7e449e62153789cf
|
[
"BSD-3-Clause"
] | null | null | null |
coresched_vs.py
|
dreibh/planetlab-lxc-nodemanager
|
e3b9608c2e4184851f1fd2be7e449e62153789cf
|
[
"BSD-3-Clause"
] | null | null | null |
"""Whole core scheduling
"""
import logger
import os
glo_coresched_simulate = False
class CoreSched:
""" Whole-core scheduler
The main entrypoint is adjustCores(self, slivers) which takes a
dictionary of sliver records. The cpu_cores field is pulled from the
effective rspec (rec["_rspec"]) for each sliver.
If cpu_cores > 0 for a sliver, then that sliver will reserve one or
more of the cpu_cores on the machine.
One core is always left unreserved for system slices.
"""
def __init__(self, cgroup_var_name="cpuset.cpus", slice_attr_name="cpu_cores"):
self.cpus = []
self.cgroup_var_name = cgroup_var_name
self.slice_attr_name = slice_attr_name
self.cgroup_mem_name = "cpuset.mems"
self.mems=[]
self.mems_map={}
self.cpu_siblings={}
def get_cgroup_var(self, name=None, filename=None):
""" decode cpuset.cpus or cpuset.mems into a list of units that can
be reserved.
"""
assert(filename!=None or name!=None)
if filename==None:
filename="/dev/cgroup/" + name
data = open(filename).readline().strip()
if not data:
return []
units = []
# cpuset.cpus could be something as arbitrary as:
# 0,1,2-3,4,5-6
# deal with commas and ranges
for part in data.split(","):
unitRange = part.split("-")
if len(unitRange) == 1:
unitRange = (unitRange[0], unitRange[0])
for i in range(int(unitRange[0]), int(unitRange[1])+1):
if not i in units:
units.append(i)
return units
def get_cpus(self):
""" return a list of available cpu identifiers: [0,1,2,3...]
"""
# the cpus never change, so if it's already been computed then don't
# worry about it.
if self.cpus!=[]:
return self.cpus
self.cpus = self.get_cgroup_var(self.cgroup_var_name)
self.cpu_siblings = {}
for item in self.cpus:
self.cpu_siblings[item] = self.get_core_siblings(item)
return self.cpus
def find_cpu_mostsiblings(self, cpus):
bestCount = -1
bestCpu = -1
for cpu in cpus:
count = 0
for candidate in self.cpu_siblings[cpu]:
if candidate in cpus:
count = count + 1
if (count > bestCount):
bestCount = count
bestCpu = cpu
assert(bestCpu >= 0)
return bestCpu
def find_compatible_cpu(self, cpus, compatCpu):
if compatCpu==None:
return self.find_cpu_mostsiblings(cpus)
# find a sibling if we can
bestDelta = None
bestCpu = None
for cpu in cpus:
if compatCpu in self.cpu_siblings[cpu]:
return cpu
return self.find_cpu_mostsiblings(cpus)
def get_cgroups (self):
""" return a list of cgroups
this might change as vservers are instantiated, so always compute
it dynamically.
"""
cgroups = []
filenames = os.listdir("/dev/cgroup")
for filename in filenames:
if os.path.isdir(os.path.join("/dev/cgroup", filename)):
cgroups.append(filename)
return cgroups
def decodeCoreSpec (self, cores):
""" Decode the value of the core attribute. It's a number, followed by
an optional letter "b" to indicate besteffort cores should also
be supplied.
"""
bestEffort = False
if cores.endswith("b"):
cores = cores[:-1]
bestEffort = True
try:
cores = int(cores)
except ValueError:
cores = 0
return (cores, bestEffort)
def adjustCores (self, slivers):
""" slivers is a dict of {sliver_name: rec}
rec is a dict of attributes
rec['_rspec'] is the effective rspec
"""
cpus = self.get_cpus()[:]
mems = self.get_mems()[:]
memSchedule=True
if (len(mems) != len(cpus)):
logger.log("CoreSched fewer mems than " + self.cgroup_var_name + "; mem scheduling disabled")
memSchedule=False
logger.log("CoreSched (" + self.cgroup_var_name + "): available units: " + str(cpus))
reservations = {}
mem_reservations = {}
# allocate the cores to the slivers that have them reserved
# TODO: Need to sort this from biggest cpu_cores to smallest
for name, rec in slivers.items():
rspec = rec["_rspec"]
cores = rspec.get(self.slice_attr_name, 0)
(cores, bestEffort) = self.decodeCoreSpec(cores)
lastCpu = None
while (cores>0):
# one cpu core reserved for best effort and system slices
if len(cpus)<=1:
logger.log("CoreSched: ran out of units while scheduling sliver " + name)
else:
cpu = self.find_compatible_cpu(cpus, lastCpu)
cpus.remove(cpu)
lastCpu = cpu
logger.log("CoreSched: allocating unit " + str(cpu) + " to slice " + name)
reservations[name] = reservations.get(name, []) + [cpu]
# now find a memory node to go with the cpu
if memSchedule:
mem = self.find_associated_memnode(mems, cpu)
if mem != None:
mems.remove(mem)
logger.log("CoreSched: allocating memory node " + str(mem) + " to slice " + name)
mem_reservations[name] = mem_reservations.get(name, []) + [mem]
else:
logger.log("CoreSched: failed to find memory node for cpu" + str(cpu))
cores = cores-1
# the leftovers go to everyone else
logger.log("CoreSched: allocating unit " + str(cpus) + " to _default")
reservations["_default"] = cpus[:]
mem_reservations["_default"] = mems[:]
# now check and see if any of our slices had the besteffort flag
# set
for name, rec in slivers.items():
rspec = rec["_rspec"]
cores = rspec.get(self.slice_attr_name, 0)
(cores, bestEffort) = self.decodeCoreSpec(cores)
# if the bestEffort flag isn't set then we have nothing to do
if not bestEffort:
continue
# note that if a reservation is [], then we don't need to add
# bestEffort cores to it, since it is bestEffort by default.
if reservations.get(name, []) != []:
reservations[name] = reservations[name] + reservations["_default"]
mem_reservations[name] = mem_reservations.get(name, []) + mem_reservations["_default"]
logger.log("CoreSched: adding besteffort units to " + name + ". new units = " + str(reservations[name]))
self.reserveUnits(self.cgroup_var_name, reservations)
self.reserveUnits(self.cgroup_mem_name, mem_reservations)
def reserveUnits (self, var_name, reservations):
""" give a set of reservations (dictionary of slicename:cpuid_list),
write those reservations to the appropriate cgroup files.
reservations["_default"] is assumed to be the default reservation
for slices that do not reserve cores. It's essentially the leftover
cpu cores.
"""
default = reservations["_default"]
# set the default vserver cpuset. this will deal with any vservers
# that might be created before the nodemanager has had a chance to
# update the cpusets.
self.reserveDefault(var_name, default)
for cgroup in self.get_cgroups():
if cgroup in reservations:
cpus = reservations[cgroup]
logger.log("CoreSched: reserving " + var_name + " on " + cgroup + ": " + str(cpus))
else:
# no log message for default; too much verbosity in the common case
cpus = default
if glo_coresched_simulate:
print("R", "/dev/cgroup/" + cgroup + "/" + var_name, self.listToRange(cpus))
else:
with opwn("/dev/cgroup/{}/{}".format(cgroup, var_name), "w") as f:
f.write( self.listToRange(cpus) + "\n" )
def reserveDefault (self, var_name, cpus):
if not os.path.exists("/etc/vservers/.defaults/cgroup"):
os.makedirs("/etc/vservers/.defaults/cgroup")
if glo_coresched_simulate:
print("RDEF", "/etc/vservers/.defaults/cgroup/" + var_name, self.listToRange(cpus))
else:
with open("/etc/vservers/.defaults/cgroup/{}".format(var_name), "w") as f:
f.write( self.listToRange(cpus) + "\n" )
def listToRange (self, list):
""" take a list of items [1,2,3,5,...] and return it as a range: "1-3,5"
for now, just comma-separate
"""
return ",".join( [str(i) for i in list] )
def get_mems(self):
""" return a list of available cpu identifiers: [0,1,2,3...]
"""
# the cpus never change, so if it's already been computed then don't
# worry about it.
if self.mems!=[]:
return self.mems
self.mems = self.get_cgroup_var(self.cgroup_mem_name)
# build a mapping from memory nodes to the cpus they can be used with
mems_map={}
for item in self.mems:
mems_map[item] = self.get_memnode_cpus(item)
if (len(mems_map)>0):
# when NUMA_EMU is enabled, only the last memory node will contain
# the cpu_map. For example, if there were originally 2 nodes and
# we used NUM_EMU to raise it to 12, then
# mems_map[0]=[]
# ...
# mems_map[4]=[]
# mems_map[5]=[1,3,5,7,9,11]
# mems_map[6]=[]
# ...
# mems_map[10]=[]
# mems_map[11]=[0,2,4,6,8,10]
# so, we go from back to front, copying the entries as necessary.
if mems_map[self.mems[0]] == []:
work = []
for item in reversed(self.mems):
if mems_map[item] != []:
work = mems_map[item]
else: # mems_map[item]==[]
mems_map[item] = work
self.mems_map = mems_map
return self.mems
def find_associated_memnode(self, mems, cpu):
""" Given a list of memory nodes and a cpu, see if one of the nodes in
the list can be used with that cpu.
"""
for item in mems:
if cpu in self.mems_map[item]:
return item
return None
def get_memnode_cpus(self, index):
""" for a given memory node, return the CPUs that it is associated
with.
"""
fn = "/sys/devices/system/node/node" + str(index) + "/cpulist"
if not os.path.exists(fn):
logger.log("CoreSched: failed to locate memory node" + fn)
return []
return self.get_cgroup_var(filename=fn)
def get_core_siblings(self, index):
# use core_siblings rather than core_siblings_list, as it's compatible
# with older kernels
fn = "/sys/devices/system/cpu/cpu" + str(index) + "/topology/core_siblings"
if not os.path.exists(fn):
return []
siblings = []
with open(fn, "rt") as f:
x = int(f.readline().strip(), 16)
cpuid = 0
while (x>0):
if (x&1)!=0:
siblings.append(cpuid)
x = x >> 1
cpuid += 1
return siblings
# a little self-test
if __name__=="__main__":
glo_coresched_simulate = True
x = CoreSched()
print("cgroups:", ",".join(x.get_cgroups()))
print("cpus:", x.listToRange(x.get_cpus()))
print("sibling map:")
for item in x.get_cpus():
print(" ", item, ",".join([str(y) for y in x.cpu_siblings.get(item, [])]))
print("mems:", x.listToRange(x.get_mems()))
print("cpu to memory map:")
for item in x.get_mems():
print(" ", item, ",".join([str(y) for y in x.mems_map.get(item, [])]))
rspec_sl_test1 = {"cpu_cores": "1"}
rec_sl_test1 = {"_rspec": rspec_sl_test1}
rspec_sl_test2 = {"cpu_cores": "5"}
rec_sl_test2 = {"_rspec": rspec_sl_test2}
rspec_sl_test3 = {"cpu_cores": "3b"}
rec_sl_test3 = {"_rspec": rspec_sl_test3}
#slivers = {"sl_test1": rec_sl_test1, "sl_test2": rec_sl_test2}
slivers = {"arizona_beta": rec_sl_test1, "arizona_test101": rec_sl_test2, "pl_sirius": rec_sl_test3}
#slivers = {"arizona_beta": rec_sl_test1, "arizona_logmon": rec_sl_test2, "arizona_owl": rec_sl_test3}
x.adjustCores(slivers)
| 34.426702
| 120
| 0.556384
|
63321b5c5a67c526be01fe3981c304c28e447ad5
| 104,718
|
py
|
Python
|
dataLoader.py
|
LRussianStand/ljTransparent
|
e0bd31a1bf3c5ab6056157e8c5233689b1c41d53
|
[
"MIT"
] | 3
|
2021-06-08T04:23:13.000Z
|
2021-07-13T07:42:20.000Z
|
dataLoader.py
|
LRussianStand/ljTransparent
|
e0bd31a1bf3c5ab6056157e8c5233689b1c41d53
|
[
"MIT"
] | null | null | null |
dataLoader.py
|
LRussianStand/ljTransparent
|
e0bd31a1bf3c5ab6056157e8c5233689b1c41d53
|
[
"MIT"
] | null | null | null |
#-*- coding: utf-8 -*- 可采用中文注释
import glob
import numpy as np
import os.path as osp
from PIL import Image
import random
import struct
from torch.utils.data import Dataset
import os
import scipy.ndimage as ndimage
import h5py
import cv2
import xml.etree.ElementTree as et
from skimage.transform import resize
from mesh_to_sdf import mesh_to_sdf
import trimesh
#数据读取模块
class BatchLoader(Dataset):
def __init__(self, dataRoot, shapeRoot = None,
imHeight = 360, imWidth = 480,
envHeight = 256, envWidth = 512,
isRandom=False, phase='TRAIN', rseed = 1,
isLoadVH = False, isLoadEnvmap = False,
isLoadCam = False, isLoadOptim = False,
camNum = 10, shapeRs = 0, shapeRe = 1500, volumeSize=32, batchSize = None, isOptim = False, ignore = [],
isLoadSDF = True, grid_res = 8, bounding_radius = 1.1):
self.dataRoot = dataRoot
self.shapeRoot = shapeRoot
self.imHeight = imHeight
self.imWidth = imWidth
self.envHeight = envHeight
self.envWidth = envWidth
self.phase = phase.upper()
self.isLoadVH = isLoadVH
self.isLoadCam = isLoadCam
self.isLoadEnvmap = isLoadEnvmap
self.isLoadOptim = isLoadOptim
self.camNum = camNum
self.shapeRs = shapeRs
self.shapeRe = shapeRe
self.isOptim = isOptim
self.isLoadSDF = isLoadSDF
self.grid_res = grid_res
self.bounding_radius = bounding_radius
if batchSize is None:
batchSize = camNum
self.batchSize = min(batchSize , 10)
else:
self.batchSize = batchSize
self.minX, self.maxX = -1.1, 1.1
self.minY, self.maxY = -1.1, 1.1
self.minZ, self.maxZ = -1.1, 1.1
self.volumeSize = volumeSize
y, x, z = np.meshgrid(
np.linspace(self.minX, self.maxX, volumeSize ),
np.linspace(self.minY, self.maxY, volumeSize ),
np.linspace(self.minZ, self.maxZ, volumeSize ) )
x = x[:, :, :, np.newaxis ]
y = y[:, :, :, np.newaxis ]
z = z[:, :, :, np.newaxis ]
coord = np.concatenate([x, y, z], axis=3 )
shapeList = glob.glob(osp.join(dataRoot, 'Shape__*') )
if isLoadCam:
self.originArr = []
self.lookatArr = []
self.upArr = []
for n in range(max(0, shapeRs ), min(len(shapeList ), shapeRe ) ):
if n in ignore:
continue
shape = osp.join(shapeRoot, 'Shape__%d' % n )
if not osp.isdir(shape ):
continue
camFileName = osp.join(shape, 'cam%d.txt' % camNum )
with open(camFileName, 'r') as camIn:
camLines = camIn.readlines()
viewNum = int(camLines[0].strip() )
origins = []
lookats = []
ups = []
for n in range(0, viewNum ):
originStr = camLines[3*n+1 ].strip().split(' ')
lookatStr = camLines[3*n+2 ].strip().split(' ')
upStr = camLines[3*n+3 ].strip().split(' ')
origin = np.array([float(x) for x in originStr ])[np.newaxis, :]
lookat = np.array([float(x) for x in lookatStr ])[np.newaxis, :]
up = np.array([float(x) for x in upStr])[np.newaxis, :]
origins.append(origin.astype(np.float32 ) )
lookats.append(lookat.astype(np.float32 ) )
ups.append(up.astype(np.float32 ) )
origins = np.concatenate(origins, axis=0 )
lookats = np.concatenate(lookats, axis=0 )
ups = np.concatenate(ups, axis=0 )
self.originArr.append(origins )
self.lookatArr.append(lookats )
self.upArr.append(ups )
if isLoadEnvmap:
self.envList = []
self.scaleList = []
envListUnique = []
for n in range(max(0, shapeRs ), min(len(shapeList ), shapeRe ) ):
if n in ignore:
continue
shape = osp.join(shapeRoot, 'Shape__%d' % n )
if not osp.isdir(shape ):
continue
xmlFile = osp.join(shape, 'im.xml')
# Create rendering file for Depth maps
tree = et.parse(xmlFile )
root = tree.getroot()
shapes = root.findall('emitter')
assert(len(shapes ) == 1 )
for shape in shapes:
strings = shape.findall('string')
assert(len(strings) == 1 )
for st in strings:
envFileName = st.get('value')
envFileName = envFileName.replace('/home/zhl/CVPR20/TransparentShape','../')
if not osp.isfile(envFileName):
print(shapeList[n])
if not envFileName.find('1640')==-1:
print(shapeList[n])
floats = shape.findall('float')
assert(len(floats) == 1 )
for f in floats:
scale = float(f.get('value') )
self.envList.append(envFileName )
self.scaleList.append(scale )
if envFileName not in envListUnique:
envListUnique.append(envFileName )
print("Number of environment maps %d" % (len(envListUnique ) ) )
self.imList = []
for n in range(max(0, shapeRs ), min(len(shapeList ), shapeRe ) ):
if n in ignore:
continue
shape = osp.join(dataRoot, 'Shape__%d' % n )
if not osp.isdir(shape ):
continue
imNames = sorted(glob.glob(osp.join(shape, 'im_*.rgbe' ) ) )
if isRandom:
random.shuffle(imNames )
if len(imNames ) < camNum:
print('%s: %d' % (shape, len(imNames) ) )
assert(False )
self.imList.append(imNames[0:camNum ] )
if rseed is not None:
random.seed(rseed)
# Permute the image list
self.count = len(self.imList)
self.perm = list(range(self.count ) )
if isRandom:
random.shuffle(self.perm)
print("batchloader init done!:envlist:",self.envList,"imList:",self.imList,"scaleList",self.scaleList)
def __len__(self):
return len(self.perm)
def __getitem__(self, ind):
# normalize the normal vector so that it will be unit length
imNames = self.imList[self.perm[ind ] ]
if self.batchSize < self.camNum:
random.shuffle(imNames )
#当batchsize 为 camnum - 1时,最后一个照片替换为法向量camNum
if self.isOptim:
count = 0
imNamesNew = []
for n in range(0, self.camNum ):
isAdd = False
imName = imNames[n]
if n == self.camNum-2:
isAdd = True
else:
twoNormalName = imName.replace('im_', 'imtwoNormalPred%d_' % (self.camNum ) ).replace('.rgbe', '.npy')
if not osp.isfile(twoNormalName ):
isAdd = True
if isAdd == True:
imNamesNew.append(imName )
count += 1
if count == self.batchSize:
break
imNames = imNamesNew
else:
imNames = imNames[0:self.batchSize ]
segs = []
seg2s = []
normals = []
normal2s = []
depths = []
depth2s = []
ims = []
imEs = []
origins = []
lookats = []
ups = []
envs = []
segVHs = []
seg2VHs = []
normalVHs = []
normal2VHs = []
depthVHs = []
depth2VHs = []
normalOpts = []
normal2Opts = []
imScale = None
for imName in imNames:
twoBounceName = imName.replace('im_', 'imtwoBounce_').replace('.rgbe', '.npy')
if not osp.isfile(twoBounceName ):
twoBounceName = imName.replace('im_', 'imtwoBounce_').replace('.rgbe', '.h5')
hf = h5py.File(twoBounceName, 'r')
twoBounce = np.array(hf.get('data'), dtype=np.float32 )
hf.close()
else:
twoBounce = np.load(twoBounceName )
if twoBounce.shape[0] != self.imWidth or twoBounce.shape[1] != self.imHeight:
newTwoBounce1 = cv2.resize(twoBounce[:, :, 0:3], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce2 = cv2.resize(twoBounce[:, :, 3:6], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce3 = cv2.resize(twoBounce[:, :, 6], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce4 = cv2.resize(twoBounce[:, :, 7:10], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce5 = cv2.resize(twoBounce[:, :, 10:13], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce6 = cv2.resize(twoBounce[:, :, 13], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
twoBounce = np.concatenate((newTwoBounce1, newTwoBounce2, newTwoBounce3[:, :, np.newaxis],
newTwoBounce4, newTwoBounce5, newTwoBounce6[:, :, np.newaxis] ), axis=2)
normal = twoBounce[:, :, 0:3].transpose([2, 0, 1] )
normal = np.ascontiguousarray(normal )
seg = twoBounce[:, :, 6:7].transpose([2, 0, 1] ) > 0.9
seg = np.ascontiguousarray(seg.astype(np.float32) )
seg = seg[:,:,::-1]
depth = twoBounce[:, :, 3:6].transpose([2, 0, 1] )
depth = np.ascontiguousarray(depth )
depth = depth * seg
normal2 = twoBounce[:, :, 7:10].transpose([2, 0, 1] )
normal2 = np.ascontiguousarray(normal2 )
seg2 = twoBounce[:, :, 13:14].transpose([2, 0, 1] ) > 0.9
seg2 = np.ascontiguousarray(seg2.astype(np.float32) )
depth2 = twoBounce[:, :, 10:13].transpose([2, 0, 1] )
depth2 = np.ascontiguousarray(depth2 )
depth2 = depth2 * seg
normal = normal / np.sqrt(np.maximum(np.sum(normal * normal, axis=0), 1e-10) )[np.newaxis, :]
normal = normal * seg
normal2 = normal2 / np.sqrt(np.maximum(np.sum(normal2 * normal2, axis=0), 1e-10) )[np.newaxis, :]
normal2 = normal2 * seg
# Read rendered images(照片value压缩到0~1)
imE, imScale = self.loadHDR(imName, imScale )
imE = imE[:,:,::-1]
im = imE * seg
imId = int(imName.split('/')[-1].split('.')[0].split('_')[-1] )
shapeId = int(imName.split('/')[-2].split('_')[-1] ) - self.shapeRs
segs.append(seg[np.newaxis, :] )
seg2s.append(seg2[np.newaxis, :] )
normals.append(normal[np.newaxis, :] )
normal2s.append(normal2[np.newaxis, :] )
depths.append(depth[np.newaxis, :] )
depth2s.append(depth2[np.newaxis, :] )
ims.append(im[np.newaxis, :] )
imEs.append(imE[np.newaxis, :] )
# Load the rendering file
if self.isLoadCam:
origin = self.originArr[shapeId ][imId-1 ]
lookat = self.lookatArr[shapeId ][imId-1 ]
up = self.upArr[shapeId ][imId-1 ]
origins.append(origin[np.newaxis, :] )
lookats.append(lookat[np.newaxis, :] )
ups.append(up[np.newaxis, :] )
if self.isLoadEnvmap:
envFileName = self.envList[shapeId ]
scale = self.scaleList[shapeId ]
env = cv2.imread(envFileName, -1)
if env is None:
print(envFileName)
env = env[:, :, ::-1]
env = cv2.resize(env, (self.envWidth, self.envHeight ), interpolation=cv2.INTER_LINEAR)
env = np.ascontiguousarray(env )
env = env.transpose([2, 0, 1]) * imScale * scale
envs.append(env[np.newaxis, :] )
if self.isLoadVH:
twoBounceVHName = imName.replace('im_', 'imVH_%dtwoBounce_' % self.camNum ).replace('.rgbe', '.npy')
if not osp.isfile(twoBounceVHName ):
twoBounceVHName = imName.replace('im_', 'imVH_%dtwoBounce_' % self.camNum ).replace('.rgbe', '.h5')
hf = h5py.File(twoBounceVHName, 'r')
twoBounceVH = np.array(hf.get('data'), dtype=np.float32 )
hf.close()
else:
twoBounceVH = np.load(twoBounceVHName )
if twoBounceVH.shape[0] != self.imWidth or twoBounceVH.shape[1] != self.imHeight:
newTwoBounce1 = cv2.resize(twoBounceVH[:, :, 0:3], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce2 = cv2.resize(twoBounceVH[:, :, 3:6], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce3 = cv2.resize(twoBounceVH[:, :, 6], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce4 = cv2.resize(twoBounceVH[:, :, 7:10], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce5 = cv2.resize(twoBounceVH[:, :, 10:13], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce6 = cv2.resize(twoBounceVH[:, :, 13], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
twoBounceVH = np.concatenate((newTwoBounce1, newTwoBounce2, newTwoBounce3[:, :, np.newaxis],
newTwoBounce4, newTwoBounce5, newTwoBounce6[:, :, np.newaxis] ), axis=2)
normalVH = twoBounceVH[:, :, 0:3].transpose([2, 0, 1])
normalVH = np.ascontiguousarray(normalVH )
segVH = twoBounceVH[:, :, 6:7].transpose([2, 0, 1] ) > 0.9
segVH = np.ascontiguousarray(segVH.astype(np.float32) )
depthVH = twoBounceVH[:, :, 3:6].transpose([2, 0, 1])
depthVH = np.ascontiguousarray(depthVH )
depthVH = depthVH * segVH
normal2VH = twoBounceVH[:, :, 7:10].transpose([2, 0, 1])
normal2VH = np.ascontiguousarray(normal2VH )
seg2VH = twoBounceVH[:, :, 13:14].transpose([2, 0, 1] ) > 0.9
seg2VH = np.ascontiguousarray(seg2VH.astype(np.float32) )
depth2VH = twoBounceVH[:, :, 10:13].transpose([2, 0, 1])
depth2VH = np.ascontiguousarray(depth2VH )
depth2VH = depth2VH * segVH
normalVH = normalVH / np.sqrt(np.maximum(np.sum(normalVH * normalVH, axis=0), 1e-10) )[np.newaxis, :]
normalVH = normalVH * segVH
normal2VH = normal2VH / np.sqrt(np.maximum(np.sum(normal2VH * normal2VH, axis=0), 1e-10) )[np.newaxis, :]
normal2VH = normal2VH * segVH
segVHs.append(segVH[np.newaxis, :] )
seg2VHs.append(seg2VH[np.newaxis, :] )
normalVHs.append(normalVH[np.newaxis, :] )
normal2VHs.append(normal2VH[np.newaxis, :] )
depthVHs.append(depthVH[np.newaxis, :] )
depth2VHs.append(depth2VH[np.newaxis, :] )
if self.isLoadOptim:
twoNormalName = imName.replace('im_', 'imtwoNormalPred%d_' % (self.camNum ) ).replace('.rgbe', '.npy')
if not osp.isfile(twoNormalName ):
twoNormalName = imName.replace('im_', 'imtwoNormalPred%d_' % (self.camNum ) ).replace('.rgbe', '.h5')
hf = h5py.File(twoNormalName, 'r')
twoNormals = np.array(hf.get('data'), dtype=np.float32 )
hf.close()
else:
twoNormals = np.load(twoNormalName )
normalOpt, normal2Opt = twoNormals[:, :, 0:3], twoNormals[:, :, 3:6]
normalOpt = cv2.resize(normalOpt, (self.imWidth, self.imHeight), interpolation = cv2.INTER_AREA )
normal2Opt = cv2.resize(normal2Opt, (self.imWidth, self.imHeight), interpolation = cv2.INTER_AREA )
normalOpt = np.ascontiguousarray(normalOpt.transpose([2, 0, 1] ) )
normal2Opt = np.ascontiguousarray(normal2Opt.transpose([2, 0, 1] ) )
normalOpt = normalOpt / np.sqrt(np.maximum(np.sum(normalOpt * normalOpt, axis=0), 1e-10) )[np.newaxis, :]
normalOpt = normalOpt * seg
normal2Opt = normal2Opt / np.sqrt(np.maximum(np.sum(normal2Opt * normal2Opt, axis=0), 1e-10) )[np.newaxis, :]
normal2Opt = normal2Opt * seg
normalOpts.append(normalOpt[np.newaxis, :] )
normal2Opts.append(normal2Opt[np.newaxis, :] )
segs = np.concatenate(segs, axis=0 )
seg2s = np.concatenate(seg2s, axis=0 )
normals = np.concatenate(normals, axis=0 )
normal2s = np.concatenate(normal2s, axis=0 )
depths = np.concatenate(depths, axis=0 )
depth2s = np.concatenate(depth2s, axis=0 )
ims = np.concatenate(ims, axis=0 )
imEs = np.concatenate(imEs, axis=0 )
batchDict = {'seg1': segs,
'seg2': seg2s,
'normal1': normals,
'normal2': normal2s,
'depth1': depths,
'depth2': depth2s,
'im': ims,
'imE': imEs,
'name': imNames }
if self.isLoadCam:
origins = np.concatenate(origins, axis=0 )
lookats = np.concatenate(lookats, axis=0 )
ups = np.concatenate(ups, axis=0 )
batchDict['origin'] = origins
batchDict['lookat'] = lookats
batchDict['up'] = ups
if self.isLoadEnvmap:
envs = np.concatenate(envs, axis=0 )
batchDict['env'] = envs
if self.isLoadVH:
segVHs = np.concatenate(segVHs, axis=0 )
seg2VHs = np.concatenate(seg2VHs, axis=0 )
normalVHs = np.concatenate(normalVHs, axis=0 )
normal2VHs = np.concatenate(normal2VHs, axis=0 )
depthVHs = np.concatenate(depthVHs, axis=0 )
depth2VHs = np.concatenate(depth2VHs, axis=0 )
batchDict['seg1VH'] = segVHs
batchDict['seg2VH'] = seg2VHs
batchDict['normal1VH'] = normalVHs
batchDict['normal2VH'] = normal2VHs
batchDict['depth1VH'] = depthVHs
batchDict['depth2VH'] = depth2VHs
if self.isLoadOptim:
normalOpts = np.concatenate(normalOpts, axis=0 )
normal2Opts = np.concatenate(normal2Opts, axis=0 )
batchDict['normalOpt'] = normalOpts
batchDict['normal2Opt'] = normal2Opts
#读取sdf文件
if self.isLoadSDF:
imName = imNames[0]
shapeId = imName.split('/')[-2]
shapePath = osp.join(self.shapeRoot, shapeId)
sdfName = osp.join(shapePath, 'visualHullSubd_%d_%d_sdf.npy' % (self.camNum,self.grid_res))
batchDict['shape_path'] = shapePath
if osp.isfile(sdfName):
batchDict['grid'] = np.load(sdfName).astype(np.float)
else:
VHName = osp.join(shapePath, 'visualHullSubd_%d.ply' % self.camNum)
mesh = trimesh.load(VHName)
linear_space = np.linspace(-self.bounding_radius, self.bounding_radius, self.grid_res)
grid_x, grid_y, grid_z = np.meshgrid(linear_space, linear_space, linear_space)
coords = np.stack((grid_x, grid_y, grid_z),axis=3)
query_points = coords.reshape((-1,3))
sdfs = mesh_to_sdf(mesh, query_points, surface_point_method='sample', sign_method='normal',
bounding_radius=None, scan_count=100,
scan_resolution=400, sample_point_count=10000000, normal_sample_count=11)
sdfs = np.reshape(sdfs, grid_x.shape).transpose((1,0,2))
batchDict['grid'] = sdfs
np.save(sdfName,sdfs)
gt_sdfName = osp.join(shapePath, 'object_sdf_%d.npy'%(self.grid_res))
if osp.isfile(gt_sdfName):
batchDict['gt_grid'] = np.load(gt_sdfName).astype(np.float)
else:
#gtName = osp.join(shapePath, 'meshGT_transform.ply')
gtName = osp.join(shapePath, 'object.obj')
gtmesh = trimesh.load(gtName)
linear_space = np.linspace(-self.bounding_radius, self.bounding_radius, self.grid_res)
grid_x, grid_y, grid_z = np.meshgrid(linear_space, linear_space, linear_space)
coords = np.stack((grid_x, grid_y, grid_z), axis=3)
query_points = coords.reshape((-1, 3))
gtsdfs = mesh_to_sdf(gtmesh, query_points, surface_point_method='sample', sign_method='normal',
bounding_radius=None, scan_count=100,
scan_resolution=400, sample_point_count=10000000, normal_sample_count=20)
gtsdfs = np.reshape(gtsdfs, grid_x.shape).transpose((1, 0, 2))
batchDict['gt_grid'] = gtsdfs
np.save(gt_sdfName, gtsdfs)
return batchDict
def loadHDR(self, imName, scale):
if not osp.isfile(imName ):
print('Error: %s does not exist.' % imName )
assert(False )
image = cv2.imread(imName, -1 )[:, :, ::-1]
image = cv2.resize(image, (self.imWidth, self.imHeight ), interpolation=cv2.INTER_LINEAR)
image = np.ascontiguousarray(image )
imMean = np.mean(image )
if scale is None:
if self.phase == 'TRAIN':
scale = (np.random.random() * 0.2 + 0.4) / imMean
else:
scale = 0.5 / imMean
image = (image*scale).transpose([2, 0, 1] )
#image = np.clip((image * scale), 0, 1).transpose([2, 0, 1])
return image, scale
def loadImage(self, imName, isGama = False):
if not os.path.isfile(imName):
print('Fail to load {0}'.format(imName) )
im = np.zeros([3, self.imSize, self.imSize], dtype=np.float32)
return im
im = Image.open(imName)
im = self.imResize(im)
im = np.asarray(im, dtype=np.float32)
if isGama:
im = (im / 255.0) ** 2.2
im = 2 * im - 1
else:
im = (im - 127.5) / 127.5
if len(im.shape) == 2:
im = im[:, np.newaxis]
im = np.transpose(im, [2, 0, 1])
return im
def imResize(self, im):
w0, h0 = im.size
if w0 != self.imHeight or h0 != self.imWidth:
im = im.resize( (self.imWidth, self.imHeight ), Image.ANTIALIAS)
return im
#-------------------------------------------------------------------------------------------------------
class BatchLoaderReal2(Dataset):
def __init__(self, dataRoot, shapeRoot = None,
imHeight = 360, imWidth = 480,
envHeight = 256, envWidth = 512,
isRandom=False, phase='TRAIN', rseed = 1,
isLoadVH = False, isLoadEnvmap = False,
isLoadCam = False, isLoadOptim = False,
camNum = 10, shapeRs = 0, shapeRe = 1500, volumeSize=32, batchSize = None, isOptim = False, ignore = [],
isLoadSDF = True, grid_res = 8, bounding_radius = 1.1):
self.dataRoot = dataRoot
self.shapeRoot = shapeRoot
self.imHeight = imHeight
self.imWidth = imWidth
self.envHeight = envHeight
self.envWidth = envWidth
self.phase = phase.upper()
self.isLoadVH = isLoadVH
self.isLoadCam = isLoadCam
self.isLoadEnvmap = isLoadEnvmap
self.isLoadOptim = isLoadOptim
self.camNum = camNum
self.shapeRs = shapeRs
self.shapeRe = shapeRe
self.isOptim = isOptim
self.isLoadSDF = isLoadSDF
self.grid_res = grid_res
self.bounding_radius = bounding_radius
if batchSize is None:
batchSize = camNum
self.batchSize = min(batchSize , 10)
else:
self.batchSize = batchSize
self.minX, self.maxX = -1.1, 1.1
self.minY, self.maxY = -1.1, 1.1
self.minZ, self.maxZ = -1.1, 1.1
self.volumeSize = volumeSize
y, x, z = np.meshgrid(
np.linspace(self.minX, self.maxX, volumeSize ),
np.linspace(self.minY, self.maxY, volumeSize ),
np.linspace(self.minZ, self.maxZ, volumeSize ) )
x = x[:, :, :, np.newaxis ]
y = y[:, :, :, np.newaxis ]
z = z[:, :, :, np.newaxis ]
coord = np.concatenate([x, y, z], axis=3 )
shapeList = glob.glob(osp.join(dataRoot, 'Shape__*') )
if isLoadCam:
self.originArr = []
self.lookatArr = []
self.upArr = []
for n in range(max(0, shapeRs ), min(len(shapeList ), shapeRe ) ):
if n in ignore:
continue
shape = osp.join(shapeRoot, 'Shape__%d' % n )
if not osp.isdir(shape ):
continue
camFileName = osp.join(shape, 'cam%d.txt' % camNum )
with open(camFileName, 'r') as camIn:
camLines = camIn.readlines()
viewNum = int(camLines[0].strip() )
origins = []
lookats = []
ups = []
for n in range(0, viewNum ):
originStr = camLines[3*n+1 ].strip().split(' ')
lookatStr = camLines[3*n+2 ].strip().split(' ')
upStr = camLines[3*n+3 ].strip().split(' ')
origin = np.array([float(x) for x in originStr ])[np.newaxis, :]
lookat = np.array([float(x) for x in lookatStr ])[np.newaxis, :]
up = np.array([float(x) for x in upStr])[np.newaxis, :]
origins.append(origin.astype(np.float32 ) )
lookats.append(lookat.astype(np.float32 ) )
ups.append(up.astype(np.float32 ) )
origins = np.concatenate(origins, axis=0 )
lookats = np.concatenate(lookats, axis=0 )
ups = np.concatenate(ups, axis=0 )
self.originArr.append(origins )
self.lookatArr.append(lookats )
self.upArr.append(ups )
if isLoadEnvmap:
self.envList = []
self.scaleList = []
envListUnique = []
for n in range(max(0, shapeRs ), min(len(shapeList ), shapeRe ) ):
if n in ignore:
continue
shape = osp.join(shapeRoot, 'Shape__%d' % n )
if not osp.isdir(shape ):
continue
xmlFile = osp.join(shape, 'im.xml')
# Create rendering file for Depth maps
tree = et.parse(xmlFile )
root = tree.getroot()
shapes = root.findall('emitter')
assert(len(shapes ) == 1 )
for shape in shapes:
strings = shape.findall('string')
assert(len(strings) == 1 )
for st in strings:
envFileName = st.get('value')
envFileName = envFileName.replace('/home/zhl/CVPR20/TransparentShape','../')
if not osp.isfile(envFileName):
print(shapeList[n])
if not envFileName.find('1640')==-1:
print(shapeList[n])
floats = shape.findall('float')
assert(len(floats) == 1 )
for f in floats:
scale = float(f.get('value') )
self.envList.append(envFileName )
self.scaleList.append(scale )
if envFileName not in envListUnique:
envListUnique.append(envFileName )
print("Number of environment maps %d" % (len(envListUnique ) ) )
self.imList = []
for n in range(max(0, shapeRs ), min(len(shapeList ), shapeRe ) ):
if n in ignore:
continue
shape = osp.join(dataRoot, 'Shape__%d' % n )
if not osp.isdir(shape ):
continue
imNames = sorted(glob.glob(osp.join(shape, 'im_*.png' ) ) )
if isRandom:
random.shuffle(imNames )
if len(imNames ) < camNum:
print('%s: %d' % (shape, len(imNames) ) )
assert(False )
self.imList.append(imNames[0:camNum ] )
if rseed is not None:
random.seed(rseed)
# Permute the image list
self.count = len(self.imList)
self.perm = list(range(self.count ) )
if isRandom:
random.shuffle(self.perm)
def __len__(self):
return len(self.perm)
def __getitem__(self, ind):
# normalize the normal vector so that it will be unit length
imNames = self.imList[self.perm[ind ] ]
if self.batchSize < self.camNum:
random.shuffle(imNames )
#当batchsize 为 camnum - 1时,最后一个照片替换为法向量camNum
if self.isOptim:
count = 0
imNamesNew = []
for n in range(0, self.camNum ):
isAdd = False
imName = imNames[n]
if n == self.camNum-2:
isAdd = True
else:
twoNormalName = imName.replace('im_', 'imtwoNormalPred%d_' % (self.camNum ) ).replace('.png', '.npy')
if not osp.isfile(twoNormalName ):
isAdd = True
if isAdd == True:
imNamesNew.append(imName )
count += 1
if count == self.batchSize:
break
imNames = imNamesNew
else:
imNames = imNames[0:self.batchSize ]
segs = []
seg2s = []
normals = []
normal2s = []
depths = []
depth2s = []
ims = []
imEs = []
origins = []
lookats = []
ups = []
envs = []
segVHs = []
seg2VHs = []
normalVHs = []
normal2VHs = []
depthVHs = []
depth2VHs = []
normalOpts = []
normal2Opts = []
imScale = None
for imName in imNames:
twoBounceName = imName.replace('im_', 'imVH_twoBounce_').replace('.png', '.npy')
if not osp.isfile(twoBounceName ):
twoBounceName = imName.replace('im_', 'imVH_twoBounce_').replace('.png', '.h5')
hf = h5py.File(twoBounceName, 'r')
twoBounce = np.array(hf.get('data'), dtype=np.float32 )
hf.close()
else:
twoBounce = np.load(twoBounceName )
if twoBounce.shape[0] != self.imWidth or twoBounce.shape[1] != self.imHeight:
newTwoBounce1 = cv2.resize(twoBounce[:, :, 0:3], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce2 = cv2.resize(twoBounce[:, :, 3:6], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce3 = cv2.resize(twoBounce[:, :, 6], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce4 = cv2.resize(twoBounce[:, :, 7:10], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce5 = cv2.resize(twoBounce[:, :, 10:13], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce6 = cv2.resize(twoBounce[:, :, 13], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
twoBounce = np.concatenate((newTwoBounce1, newTwoBounce2, newTwoBounce3[:, :, np.newaxis],
newTwoBounce4, newTwoBounce5, newTwoBounce6[:, :, np.newaxis] ), axis=2)
normal = twoBounce[:, :, 0:3].transpose([2, 0, 1] )
normal = np.ascontiguousarray(normal )
seg = twoBounce[:, :, 6:7].transpose([2, 0, 1] ) > 0.9
seg = np.ascontiguousarray(seg.astype(np.float32) )
seg = seg[:,:,::-1]
depth = twoBounce[:, :, 3:6].transpose([2, 0, 1] )
depth = np.ascontiguousarray(depth )
depth = depth * seg
normal2 = twoBounce[:, :, 7:10].transpose([2, 0, 1] )
normal2 = np.ascontiguousarray(normal2 )
seg2 = twoBounce[:, :, 13:14].transpose([2, 0, 1] ) > 0.9
seg2 = np.ascontiguousarray(seg2.astype(np.float32) )
depth2 = twoBounce[:, :, 10:13].transpose([2, 0, 1] )
depth2 = np.ascontiguousarray(depth2 )
depth2 = depth2 * seg
normal = normal / np.sqrt(np.maximum(np.sum(normal * normal, axis=0), 1e-10) )[np.newaxis, :]
normal = normal * seg
normal2 = normal2 / np.sqrt(np.maximum(np.sum(normal2 * normal2, axis=0), 1e-10) )[np.newaxis, :]
normal2 = normal2 * seg
# Read rendered images(照片value压缩到0~1)
imE, imScale = self.loadHDR(imName, imScale )
imE = imE[:,:,::-1]
im = imE * seg
imId = int(imName.split('/')[-1].split('.')[0].split('_')[-1] )
shapeId = int(imName.split('/')[-2].split('_')[-1] ) - self.shapeRs
segs.append(seg[np.newaxis, :] )
seg2s.append(seg2[np.newaxis, :] )
normals.append(normal[np.newaxis, :] )
normal2s.append(normal2[np.newaxis, :] )
depths.append(depth[np.newaxis, :] )
depth2s.append(depth2[np.newaxis, :] )
ims.append(im[np.newaxis, :] )
imEs.append(imE[np.newaxis, :] )
# Load the rendering file
if self.isLoadCam:
origin = self.originArr[shapeId ][imId-1 ]
lookat = self.lookatArr[shapeId ][imId-1 ]
up = self.upArr[shapeId ][imId-1 ]
origins.append(origin[np.newaxis, :] )
lookats.append(lookat[np.newaxis, :] )
ups.append(up[np.newaxis, :] )
if self.isLoadEnvmap:
envFileName = self.envList[shapeId ]
scale = self.scaleList[shapeId ]
env = cv2.imread(envFileName, -1)
env = cv2.cvtColor(env,cv2.COLOR_BGRA2BGR)
if env is None:
print(envFileName)
env = env[:, :, ::-1]
env = cv2.resize(env, (self.envWidth, self.envHeight ), interpolation=cv2.INTER_LINEAR)
env = np.ascontiguousarray(env )
env = env.transpose([2, 0, 1]) * imScale * scale
envs.append(env[np.newaxis, :] )
if self.isLoadVH:
twoBounceVHName = imName.replace('im_', 'imVH_twoBounce_').replace('.png', '.npy')
if not osp.isfile(twoBounceVHName ):
twoBounceVHName = imName.replace('im_', 'imVH_twoBounce_').replace('.png', '.h5')
hf = h5py.File(twoBounceVHName, 'r')
twoBounceVH = np.array(hf.get('data'), dtype=np.float32 )
hf.close()
else:
twoBounceVH = np.load(twoBounceVHName )
if twoBounceVH.shape[0] != self.imWidth or twoBounceVH.shape[1] != self.imHeight:
newTwoBounce1 = cv2.resize(twoBounceVH[:, :, 0:3], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce2 = cv2.resize(twoBounceVH[:, :, 3:6], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce3 = cv2.resize(twoBounceVH[:, :, 6], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce4 = cv2.resize(twoBounceVH[:, :, 7:10], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce5 = cv2.resize(twoBounceVH[:, :, 10:13], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce6 = cv2.resize(twoBounceVH[:, :, 13], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
twoBounceVH = np.concatenate((newTwoBounce1, newTwoBounce2, newTwoBounce3[:, :, np.newaxis],
newTwoBounce4, newTwoBounce5, newTwoBounce6[:, :, np.newaxis] ), axis=2)
normalVH = twoBounceVH[:, :, 0:3].transpose([2, 0, 1])
normalVH = np.ascontiguousarray(normalVH )
segVH = twoBounceVH[:, :, 6:7].transpose([2, 0, 1] ) > 0.9
segVH = np.ascontiguousarray(segVH.astype(np.float32) )
depthVH = twoBounceVH[:, :, 3:6].transpose([2, 0, 1])
depthVH = np.ascontiguousarray(depthVH )
depthVH = depthVH * segVH
normal2VH = twoBounceVH[:, :, 7:10].transpose([2, 0, 1])
normal2VH = np.ascontiguousarray(normal2VH )
seg2VH = twoBounceVH[:, :, 13:14].transpose([2, 0, 1] ) > 0.9
seg2VH = np.ascontiguousarray(seg2VH.astype(np.float32) )
depth2VH = twoBounceVH[:, :, 10:13].transpose([2, 0, 1])
depth2VH = np.ascontiguousarray(depth2VH )
depth2VH = depth2VH * segVH
normalVH = normalVH / np.sqrt(np.maximum(np.sum(normalVH * normalVH, axis=0), 1e-10) )[np.newaxis, :]
normalVH = normalVH * segVH
normal2VH = normal2VH / np.sqrt(np.maximum(np.sum(normal2VH * normal2VH, axis=0), 1e-10) )[np.newaxis, :]
normal2VH = normal2VH * segVH
segVHs.append(segVH[np.newaxis, :] )
seg2VHs.append(seg2VH[np.newaxis, :] )
normalVHs.append(normalVH[np.newaxis, :] )
normal2VHs.append(normal2VH[np.newaxis, :] )
depthVHs.append(depthVH[np.newaxis, :] )
depth2VHs.append(depth2VH[np.newaxis, :] )
if self.isLoadOptim:
twoNormalName = imName.replace('im_', 'imtwoNormalPred%d_' % (self.camNum ) ).replace('.png', '.npy')
if not osp.isfile(twoNormalName ):
twoNormalName = imName.replace('im_', 'imtwoNormalPred%d_' % (self.camNum ) ).replace('.png', '.h5')
hf = h5py.File(twoNormalName, 'r')
twoNormals = np.array(hf.get('data'), dtype=np.float32 )
hf.close()
else:
twoNormals = np.load(twoNormalName )
normalOpt, normal2Opt = twoNormals[:, :, 0:3], twoNormals[:, :, 3:6]
normalOpt = cv2.resize(normalOpt, (self.imWidth, self.imHeight), interpolation = cv2.INTER_AREA )
normal2Opt = cv2.resize(normal2Opt, (self.imWidth, self.imHeight), interpolation = cv2.INTER_AREA )
normalOpt = np.ascontiguousarray(normalOpt.transpose([2, 0, 1] ) )
normal2Opt = np.ascontiguousarray(normal2Opt.transpose([2, 0, 1] ) )
normalOpt = normalOpt / np.sqrt(np.maximum(np.sum(normalOpt * normalOpt, axis=0), 1e-10) )[np.newaxis, :]
normalOpt = normalOpt * seg
normal2Opt = normal2Opt / np.sqrt(np.maximum(np.sum(normal2Opt * normal2Opt, axis=0), 1e-10) )[np.newaxis, :]
normal2Opt = normal2Opt * seg
normalOpts.append(normalOpt[np.newaxis, :] )
normal2Opts.append(normal2Opt[np.newaxis, :] )
segs = np.concatenate(segs, axis=0 )
seg2s = np.concatenate(seg2s, axis=0 )
normals = np.concatenate(normals, axis=0 )
normal2s = np.concatenate(normal2s, axis=0 )
depths = np.concatenate(depths, axis=0 )
depth2s = np.concatenate(depth2s, axis=0 )
ims = np.concatenate(ims, axis=0 )
imEs = np.concatenate(imEs, axis=0 )
batchDict = {'seg1': segs,
'seg2': seg2s,
'normal1': normals,
'normal2': normal2s,
'depth1': depths,
'depth2': depth2s,
'im': ims,
'imE': imEs,
'name': imNames }
if self.isLoadCam:
origins = np.concatenate(origins, axis=0 )
lookats = np.concatenate(lookats, axis=0 )
ups = np.concatenate(ups, axis=0 )
batchDict['origin'] = origins
batchDict['lookat'] = lookats
batchDict['up'] = ups
if self.isLoadEnvmap:
envs = np.concatenate(envs, axis=0 )
batchDict['env'] = envs
if self.isLoadVH:
segVHs = np.concatenate(segVHs, axis=0 )
seg2VHs = np.concatenate(seg2VHs, axis=0 )
normalVHs = np.concatenate(normalVHs, axis=0 )
normal2VHs = np.concatenate(normal2VHs, axis=0 )
depthVHs = np.concatenate(depthVHs, axis=0 )
depth2VHs = np.concatenate(depth2VHs, axis=0 )
batchDict['seg1VH'] = segVHs
batchDict['seg2VH'] = seg2VHs
batchDict['normal1VH'] = normalVHs
batchDict['normal2VH'] = normal2VHs
batchDict['depth1VH'] = depthVHs
batchDict['depth2VH'] = depth2VHs
if self.isLoadOptim:
normalOpts = np.concatenate(normalOpts, axis=0 )
normal2Opts = np.concatenate(normal2Opts, axis=0 )
batchDict['normalOpt'] = normalOpts
batchDict['normal2Opt'] = normal2Opts
#读取sdf文件
if self.isLoadSDF:
imName = imNames[0]
shapeId = imName.split('/')[-2]
shapePath = osp.join(self.shapeRoot, shapeId)
sdfName = osp.join(shapePath, 'visualHullSubd_%d_%d_sdf.npy' % (self.camNum,self.grid_res))
batchDict['shape_path'] = shapePath
if osp.isfile(sdfName):
batchDict['grid'] = np.load(sdfName).astype(np.float)
else:
VHName = osp.join(shapePath, 'visualHullSubd_%d.ply' % self.camNum)
mesh = trimesh.load(VHName)
linear_space = np.linspace(-self.bounding_radius, self.bounding_radius, self.grid_res)
grid_x, grid_y, grid_z = np.meshgrid(linear_space, linear_space, linear_space)
coords = np.stack((grid_x, grid_y, grid_z),axis=3)
query_points = coords.reshape((-1,3))
sdfs = mesh_to_sdf(mesh, query_points, surface_point_method='sample', sign_method='normal',
bounding_radius=None, scan_count=100,
scan_resolution=400, sample_point_count=10000000, normal_sample_count=11)
sdfs = np.reshape(sdfs, grid_x.shape).transpose((1,0,2))
batchDict['grid'] = sdfs
np.save(sdfName,sdfs)
gt_sdfName = osp.join(shapePath, 'object_sdf_%d.npy'%(self.grid_res))
if osp.isfile(gt_sdfName):
batchDict['gt_grid'] = np.load(gt_sdfName).astype(np.float)
else:
gtName = osp.join(shapePath, 'meshGT_transform.ply')
gtmesh = trimesh.load(gtName)
linear_space = np.linspace(-self.bounding_radius, self.bounding_radius, self.grid_res)
grid_x, grid_y, grid_z = np.meshgrid(linear_space, linear_space, linear_space)
coords = np.stack((grid_x, grid_y, grid_z), axis=3)
query_points = coords.reshape((-1, 3))
gtsdfs = mesh_to_sdf(gtmesh, query_points, surface_point_method='sample', sign_method='normal',
bounding_radius=None, scan_count=100,
scan_resolution=400, sample_point_count=10000000, normal_sample_count=11)
gtsdfs = np.reshape(gtsdfs, grid_x.shape).transpose((1, 0, 2))
batchDict['gt_grid'] = gtsdfs
np.save(gt_sdfName, gtsdfs)
return batchDict
def loadHDR(self, imName, scale):
if not osp.isfile(imName ):
print('Error: %s does not exist.' % imName )
assert(False )
image = cv2.imread(imName, -1 )[:, :, ::-1]
image = cv2.resize(image, (self.imWidth, self.imHeight ), interpolation=cv2.INTER_LINEAR)
image = np.ascontiguousarray(image )
imMean = np.mean(image )
if scale is None:
if self.phase == 'TRAIN':
scale = (np.random.random() * 0.2 + 0.4) / imMean
else:
scale = 0.5 / imMean
image = (image*scale).transpose([2, 0, 1] )
#image = np.clip((image * scale), 0, 1).transpose([2, 0, 1])
return image, scale
def loadImage(self, imName, isGama = False):
if not os.path.isfile(imName):
print('Fail to load {0}'.format(imName) )
im = np.zeros([3, self.imSize, self.imSize], dtype=np.float32)
return im
im = Image.open(imName)
im = self.imResize(im)
im = np.asarray(im, dtype=np.float32)
if isGama:
im = (im / 255.0) ** 2.2
im = 2 * im - 1
else:
im = (im - 127.5) / 127.5
if len(im.shape) == 2:
im = im[:, np.newaxis]
im = np.transpose(im, [2, 0, 1])
return im
def imResize(self, im):
w0, h0 = im.size
if w0 != self.imHeight or h0 != self.imWidth:
im = im.resize( (self.imWidth, self.imHeight ), Image.ANTIALIAS)
return im
#------------------------------------------------------------------------------------
class BatchLoaderReal(Dataset):
def __init__(self, dataRoot, shapeRoot = None,
imHeight = 360, imWidth = 480,
envHeight = 256, envWidth = 512,
isRandom=True, phase='TRAIN', rseed = 1,
isLoadVH = False, isLoadEnvmap = False,
isLoadCam = False, isLoadOptim = False,
camNum = 10, shapeRs = 0, shapeRe = 1500, volumeSize=32, batchSize = None, isOptim = False,
ignore = [],isLoadSDF = True, grid_res = 8, bounding_radius = 1.1,):
self.dataRoot = dataRoot
self.shapeRoot = shapeRoot
self.imHeight = imHeight
self.imWidth = imWidth
self.envHeight = envHeight
self.envWidth = envWidth
self.phase = phase.upper()
self.isLoadVH = isLoadVH
self.isLoadCam = isLoadCam
self.isLoadEnvmap = isLoadEnvmap
self.isLoadOptim = isLoadOptim
#self.camNum = camNum
self.shapeRs = shapeRs
self.shapeRe = shapeRe
self.isOptim = isOptim
self.isLoadSDF = isLoadSDF
self.grid_res = grid_res
self.bounding_radius = bounding_radius
if batchSize is None:
batchSize = camNum
self.batchSize = min(batchSize , 10)
else:
self.batchSize = batchSize
self.minX, self.maxX = -1.1, 1.1
self.minY, self.maxY = -1.1, 1.1
self.minZ, self.maxZ = -1.1, 1.1
self.volumeSize = volumeSize
y, x, z = np.meshgrid(
np.linspace(self.minX, self.maxX, volumeSize ),
np.linspace(self.minY, self.maxY, volumeSize ),
np.linspace(self.minZ, self.maxZ, volumeSize ) )
x = x[:, :, :, np.newaxis ]
y = y[:, :, :, np.newaxis ]
z = z[:, :, :, np.newaxis ]
coord = np.concatenate([x, y, z], axis=3 )
shapeList = sorted(glob.glob(osp.join(dataRoot, 'Shape__*') ))
self.camNumList = []
if isLoadCam:
self.originArr = []
self.lookatArr = []
self.upArr = []
for n in range(max(0, shapeRs ), min(len(shapeList ), shapeRe ) ):
if n in ignore:
continue
shape = osp.join(shapeRoot, 'Shape__%d' % n )
if not osp.isdir(shape ):
continue
print(shape)
camNum = int(glob.glob(osp.join(shape, 'visualHullSubd*.ply'))[0].split('_')[-1].split('.')[0])
self.camNumList.append(camNum)
camFileName = osp.join(shape, 'cam%d.txt' % camNum )
with open(camFileName, 'r') as camIn:
camLines = camIn.readlines()
viewNum = int(camLines[0].strip() )
origins = []
lookats = []
ups = []
for n in range(0, viewNum ):
originStr = camLines[3*n+1 ].strip().split(' ')
lookatStr = camLines[3*n+2 ].strip().split(' ')
upStr = camLines[3*n+3 ].strip().split(' ')
origin = np.array([float(x) for x in originStr ])[np.newaxis, :]
lookat = np.array([float(x) for x in lookatStr ])[np.newaxis, :]
up = np.array([float(x) for x in upStr])[np.newaxis, :]
origins.append(origin.astype(np.float32 ) )
lookats.append(lookat.astype(np.float32 ) )
ups.append(up.astype(np.float32 ) )
origins = np.concatenate(origins, axis=0 )
lookats = np.concatenate(lookats, axis=0 )
ups = np.concatenate(ups, axis=0 )
self.originArr.append(origins )
self.lookatArr.append(lookats )
self.upArr.append(ups )
if isLoadEnvmap:
self.envList = []
self.scaleList = []
envListUnique = []
for n in range(max(0, shapeRs ), min(len(shapeList ), shapeRe ) ):
if n in ignore:
continue
shape = osp.join(shapeRoot, 'Shape__%d' % n )
if not osp.isdir(shape ):
continue
xmlFile = osp.join(shape, 'im.xml')
# Create rendering file for Depth maps
tree = et.parse(xmlFile )
root = tree.getroot()
shapes = root.findall('emitter')
assert(len(shapes ) == 1 )
for shape in shapes:
strings = shape.findall('string')
assert(len(strings) == 1 )
for st in strings:
envFileName = st.get('value')
envFileName = envFileName.replace('/home/zhl/CVPR20/TransparentShape', '../')
floats = shape.findall('float')
assert(len(floats) == 1 )
for f in floats:
scale = float(f.get('value') )
self.envList.append(envFileName )
self.scaleList.append(scale )
if envFileName not in envListUnique:
envListUnique.append(envFileName )
print("Number of environment maps %d" % (len(envListUnique ) ) )
self.imList = []
self.shapeNameList = []
self.camNumSingleList = []
for n in range(max(0, shapeRs ), min(len(shapeList ), shapeRe ) ):
if n in ignore:
continue
shape = osp.join(dataRoot, 'Shape__%d' % n )
if not osp.isdir(shape ):
continue
camNum = self.camNumList[n]
#imNames = sorted(glob.glob(osp.join(shape, 'im_*.rgbe' ) ) )
imNames = sorted(glob.glob(osp.join(shape, 'im_*.png' ) ) )
#random.shuffle(imNames )
if self.batchSize > 1:
if len(imNames ) < camNum:
print('%s: %d' % (shape, len(imNames) ) )
assert(False )
self.imList.append(imNames[0:camNum ] )
self.shapeNameList.append(os.path.basename(shape))
elif self.batchSize == 1:
for imName in imNames:
self.imList.append([imName])
self.camNumSingleList.append(camNum)
self.shapeNameList.append(os.path.basename(shape))
if rseed is not None:
random.seed(rseed)
# Permute the image list
self.count = len(self.imList)
self.perm = list(range(self.count ) )
if isRandom:
random.shuffle(self.perm)
def __len__(self):
return len(self.perm)
def __getitem__(self, ind):
# normalize the normal vector so that it will be unit length
#if self.batchSize < self.camNum:
if self.batchSize > 1:
camNum = self.camNumList[self.perm[ind ]]
elif self.batchSize == 1:
camNum = self.camNumSingleList[self.perm[ind ]]
shapeName = self.shapeNameList[self.perm[ind ]]
imNames = self.imList[self.perm[ind ] ]
if self.batchSize < camNum:
#random.shuffle(imNames )
if self.isOptim:
count = 0
imNamesNew = []
#for n in range(0, self.camNum ):
for n in range(0, camNum ):
isAdd = False
imName = imNames[n]
if camNum - n == self.batchSize - count:
isAdd = True
else:
twoNormalName = imName.replace('im_', 'imtwoNormalPred%d_' % (camNum ) ).replace('.rgbe', '.npy')
if not osp.isfile(twoNormalName ):
isAdd = True
if isAdd == True:
imNamesNew.append(imName )
count += 1
if count == self.batchSize:
break
imNames = imNamesNew
else:
imNames = imNames[0:self.batchSize ]
segs = []
#seg2s = []
#normals = []
#normal2s = []
depths = []
depth2s = []
ims = []
imEs = []
origins = []
lookats = []
ups = []
envs = []
segVHs = []
seg2VHs = []
normalVHs = []
normal2VHs = []
depthVHs = []
depth2VHs = []
normalOpts = []
normal2Opts = []
imScale = None
for imName in imNames:
'''
twoBounceName = imName.replace('im_', 'imtwoBounce_').replace('.rgbe', '.npy')
if not osp.isfile(twoBounceName ):
twoBounceName = imName.replace('im_', 'imtwoBounce_').replace('.rgbe', '.h5')
hf = h5py.File(twoBounceName, 'r')
twoBounce = np.array(hf.get('data'), dtype=np.float32 )
hf.close()
else:
twoBounce = np.load(twoBounceName )
if twoBounce.shape[0] != self.imWidth or twoBounce.shape[1] != self.imHeight:
newTwoBounce1 = cv2.resize(twoBounce[:, :, 0:3], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce2 = cv2.resize(twoBounce[:, :, 3:6], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce3 = cv2.resize(twoBounce[:, :, 6], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce4 = cv2.resize(twoBounce[:, :, 7:10], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce5 = cv2.resize(twoBounce[:, :, 10:13], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce6 = cv2.resize(twoBounce[:, :, 13], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
twoBounce = np.concatenate((newTwoBounce1, newTwoBounce2, newTwoBounce3[:, :, np.newaxis],
newTwoBounce4, newTwoBounce5, newTwoBounce6[:, :, np.newaxis] ), axis=2)
normal = twoBounce[:, :, 0:3].transpose([2, 0, 1] )
normal = np.ascontiguousarray(normal )
seg = twoBounce[:, :, 6:7].transpose([2, 0, 1] ) > 0.9
seg = np.ascontiguousarray(seg.astype(np.float32) )
depth = twoBounce[:, :, 3:6].transpose([2, 0, 1] )
depth = np.ascontiguousarray(depth )
depth = depth * seg
normal2 = twoBounce[:, :, 7:10].transpose([2, 0, 1] )
normal2 = np.ascontiguousarray(normal2 )
seg2 = twoBounce[:, :, 13:14].transpose([2, 0, 1] ) > 0.9
seg2 = np.ascontiguousarray(seg2.astype(np.float32) )
depth2 = twoBounce[:, :, 10:13].transpose([2, 0, 1] )
depth2 = np.ascontiguousarray(depth2 )
depth2 = depth2 * seg
normal = normal / np.sqrt(np.maximum(np.sum(normal * normal, axis=0), 1e-10) )[np.newaxis, :]
normal = normal * seg
normal2 = normal2 / np.sqrt(np.maximum(np.sum(normal2 * normal2, axis=0), 1e-10) )[np.newaxis, :]
normal2 = normal2 * seg
'''
# Read rendered images
#imE, imScale = self.loadHDR(imName, imScale )
imE = self.loadImage(imName, False )
imE = imE[:,:,::-1]
seg = self.loadMask(imName.replace('im', 'seg')).astype(np.float32) / 255
seg = seg[:,:,::-1]
im = imE * seg
imId = int(imName.split('/')[-1].split('.')[0].split('_')[-1] )
shapeId = int(imName.split('/')[-2].split('_')[-1] ) - self.shapeRs
segs.append(seg[np.newaxis, :] )
#seg2s.append(seg2[np.newaxis, :] )
#normals.append(normal[np.newaxis, :] )
#normal2s.append(normal2[np.newaxis, :] )
#depths.append(depth[np.newaxis, :] )
#depth2s.append(depth2[np.newaxis, :] )
ims.append(im[np.newaxis, :] )
imEs.append(imE[np.newaxis, :] )
# Load the rendering file
if self.isLoadCam:
origin = self.originArr[shapeId ][imId-1 ]
lookat = self.lookatArr[shapeId ][imId-1 ]
up = self.upArr[shapeId ][imId-1 ]
origins.append(origin[np.newaxis, :] )
lookats.append(lookat[np.newaxis, :] )
ups.append(up[np.newaxis, :] )
if self.isLoadEnvmap:
envFileName = self.envList[shapeId ]
#scale = self.scaleList[shapeId ]
env = cv2.cvtColor(cv2.imread(envFileName, -1), cv2.COLOR_BGRA2BGR)[:, :, ::-1]
#env = cv2.imread(envFileName, -1)[:, :, ::-1]
env = cv2.resize(env, (self.envWidth, self.envHeight ), interpolation=cv2.INTER_LINEAR)
env = np.ascontiguousarray(env )
env = env / 255
#env = env.transpose([2, 0, 1]) * imScale * scale
env = env.transpose([2, 0, 1]).astype(np.float32)
envs.append(env[np.newaxis, :] )
if self.isLoadVH:
#twoBounceVHName = imName.replace('im_', 'imVH_%dtwoBounce_' % self.camNum ).replace('.png', '.npy')
twoBounceVHName = imName.replace('im_', 'imVH_twoBounce_' ).replace('.png', '.npy')
if not osp.isfile(twoBounceVHName ):
twoBounceVHName = imName.replace('im_', 'imVH_twoBounce_' ).replace('.png', '.h5')
hf = h5py.File(twoBounceVHName, 'r')
twoBounceVH = np.array(hf.get('data'), dtype=np.float32 )
hf.close()
else:
twoBounceVH = np.load(twoBounceVHName )
if twoBounceVH.shape[0] != self.imWidth or twoBounceVH.shape[1] != self.imHeight:
newTwoBounce1 = cv2.resize(twoBounceVH[:, :, 0:3], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce2 = cv2.resize(twoBounceVH[:, :, 3:6], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce3 = cv2.resize(twoBounceVH[:, :, 6], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce4 = cv2.resize(twoBounceVH[:, :, 7:10], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce5 = cv2.resize(twoBounceVH[:, :, 10:13], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce6 = cv2.resize(twoBounceVH[:, :, 13], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
twoBounceVH = np.concatenate((newTwoBounce1, newTwoBounce2, newTwoBounce3[:, :, np.newaxis],
newTwoBounce4, newTwoBounce5, newTwoBounce6[:, :, np.newaxis] ), axis=2)
normalVH = twoBounceVH[:, :, 0:3].transpose([2, 0, 1])
normalVH = np.ascontiguousarray(normalVH )
segVH = twoBounceVH[:, :, 6:7].transpose([2, 0, 1] ) > 0.9
segVH = np.ascontiguousarray(segVH.astype(np.float32) )
depthVH = twoBounceVH[:, :, 3:6].transpose([2, 0, 1])
depthVH = np.ascontiguousarray(depthVH )
depthVH = depthVH * segVH
normal2VH = twoBounceVH[:, :, 7:10].transpose([2, 0, 1])
normal2VH = np.ascontiguousarray(normal2VH )
seg2VH = twoBounceVH[:, :, 13:14].transpose([2, 0, 1] ) > 0.9
seg2VH = np.ascontiguousarray(seg2VH.astype(np.float32) )
depth2VH = twoBounceVH[:, :, 10:13].transpose([2, 0, 1])
depth2VH = np.ascontiguousarray(depth2VH )
depth2VH = depth2VH * segVH
normalVH = normalVH / np.sqrt(np.maximum(np.sum(normalVH * normalVH, axis=0), 1e-10) )[np.newaxis, :]
normalVH = normalVH * segVH
normal2VH = normal2VH / np.sqrt(np.maximum(np.sum(normal2VH * normal2VH, axis=0), 1e-10) )[np.newaxis, :]
normal2VH = normal2VH * segVH
segVHs.append(segVH[np.newaxis, :] )
seg2VHs.append(seg2VH[np.newaxis, :] )
normalVHs.append(normalVH[np.newaxis, :] )
normal2VHs.append(normal2VH[np.newaxis, :] )
depthVHs.append(depthVH[np.newaxis, :] )
depth2VHs.append(depth2VH[np.newaxis, :] )
if self.isLoadOptim:
twoNormalName = imName.replace('im_', 'imtwoNormalPred%d_' % (camNum ) ).replace('.rgbe', '.npy')
if not osp.isfile(twoNormalName ):
twoNormalName = imName.replace('im_', 'imtwoNormalPred%d_' % (camNum ) ).replace('.rgbe', '.h5')
hf = h5py.File(twoNormalName, 'r')
twoNormals = np.array(hf.get('data'), dtype=np.float32 )
hf.close()
else:
twoNormals = np.load(twoNormalName )
normalOpt, normal2Opt = twoNormals[:, :, 0:3], twoNormals[:, :, 3:6]
normalOpt = cv2.resize(normalOpt, (self.imWidth, self.imHeight), interpolation = cv2.INTER_AREA )
normal2Opt = cv2.resize(normal2Opt, (self.imWidth, self.imHeight), interpolation = cv2.INTER_AREA )
normalOpt = np.ascontiguousarray(normalOpt.transpose([2, 0, 1] ) )
normal2Opt = np.ascontiguousarray(normal2Opt.transpose([2, 0, 1] ) )
normalOpt = normalOpt / np.sqrt(np.maximum(np.sum(normalOpt * normalOpt, axis=0), 1e-10) )[np.newaxis, :]
normalOpt = normalOpt * seg
normal2Opt = normal2Opt / np.sqrt(np.maximum(np.sum(normal2Opt * normal2Opt, axis=0), 1e-10) )[np.newaxis, :]
normal2Opt = normal2Opt * seg
normalOpts.append(normalOpt[np.newaxis, :] )
normal2Opts.append(normal2Opt[np.newaxis, :] )
segs = np.concatenate(segs, axis=0 )
#seg2s = np.concatenate(seg2s, axis=0 )
#normals = np.concatenate(normals, axis=0 )
#normal2s = np.concatenate(normal2s, axis=0 )
#depths = np.concatenate(depths, axis=0 )
#depth2s = np.concatenate(depth2s, axis=0 )
ims = np.concatenate(ims, axis=0 )
imEs = np.concatenate(imEs, axis=0 )
batchDict = {
'seg1': segs,
#'seg2': seg2s,
#'normal1': normals,
#'normal2': normal2s,
#'depth1': depths,
#'depth2': depth2s,
'im': ims,
'imE': imEs,
'name': imNames,
'camNum': camNum,
'shapeName': shapeName}
if self.isLoadCam:
origins = np.concatenate(origins, axis=0 )
lookats = np.concatenate(lookats, axis=0 )
ups = np.concatenate(ups, axis=0 )
batchDict['origin'] = origins
batchDict['lookat'] = lookats
batchDict['up'] = ups
if self.isLoadEnvmap:
envs = np.concatenate(envs, axis=0 )
batchDict['env'] = envs
if self.isLoadVH:
segVHs = np.concatenate(segVHs, axis=0 )
seg2VHs = np.concatenate(seg2VHs, axis=0 )
normalVHs = np.concatenate(normalVHs, axis=0 )
normal2VHs = np.concatenate(normal2VHs, axis=0 )
depthVHs = np.concatenate(depthVHs, axis=0 )
depth2VHs = np.concatenate(depth2VHs, axis=0 )
batchDict['seg1VH'] = segVHs
batchDict['seg2VH'] = seg2VHs
batchDict['normal1VH'] = normalVHs
batchDict['normal2VH'] = normal2VHs
batchDict['depth1VH'] = depthVHs
batchDict['depth2VH'] = depth2VHs
if self.isLoadOptim:
normalOpts = np.concatenate(normalOpts, axis=0 )
normal2Opts = np.concatenate(normal2Opts, axis=0 )
batchDict['normalOpt'] = normalOpts
batchDict['normal2Opt'] = normal2Opts
# 读取sdf文件
if self.isLoadSDF:
imName = imNames[0]
shapeId = imName.split('/')[-2]
shapePath = osp.join(self.shapeRoot, shapeId)
sdfName = osp.join(shapePath, 'visualHullSubd_%d_%d_sdf.npy' % (camNum, self.grid_res))
batchDict['shape_path'] = shapePath
if osp.isfile(sdfName):
batchDict['grid'] = np.load(sdfName).astype(np.float)
else:
VHName = osp.join(shapePath, 'visualHullSubd_%d.ply' % camNum)
mesh = trimesh.load(VHName)
linear_space = np.linspace(-self.bounding_radius, self.bounding_radius, self.grid_res)
grid_x, grid_y, grid_z = np.meshgrid(linear_space, linear_space, linear_space)
coords = np.stack((grid_x, grid_y, grid_z), axis=3)
query_points = coords.reshape((-1, 3))
sdfs = mesh_to_sdf(mesh, query_points, surface_point_method='sample', sign_method='normal',
bounding_radius=None, scan_count=100,
scan_resolution=400, sample_point_count=10000000, normal_sample_count=11)
sdfs = np.reshape(sdfs, grid_x.shape).transpose((1, 0, 2))
batchDict['grid'] = sdfs
np.save(sdfName, sdfs)
gt_sdfName = osp.join(shapePath, 'object_sdf_%d.npy' % (self.grid_res))
if osp.isfile(gt_sdfName):
batchDict['gt_grid'] = np.load(gt_sdfName).astype(np.float)
else:
gtName = osp.join(shapePath, 'meshGT_transform.ply')
gtmesh = trimesh.load(gtName)
linear_space = np.linspace(-self.bounding_radius, self.bounding_radius, self.grid_res)
grid_x, grid_y, grid_z = np.meshgrid(linear_space, linear_space, linear_space)
coords = np.stack((grid_x, grid_y, grid_z), axis=3)
query_points = coords.reshape((-1, 3))
gtsdfs = mesh_to_sdf(gtmesh, query_points, surface_point_method='sample', sign_method='normal',
bounding_radius=None, scan_count=100,
scan_resolution=400, sample_point_count=10000000, normal_sample_count=11)
gtsdfs = np.reshape(gtsdfs, grid_x.shape).transpose((1, 0, 2))
batchDict['gt_grid'] = gtsdfs
np.save(gt_sdfName, gtsdfs)
return batchDict
def loadHDR(self, imName, scale):
if not osp.isfile(imName ):
print('Error: %s does not exist.' % imName )
assert(False )
image = cv2.imread(imName, -1 )[:, :, ::-1]
image = cv2.resize(image, (self.imWidth, self.imHeight ), interpolation=cv2.INTER_LINEAR)
image = np.ascontiguousarray(image )
imMean = np.mean(image )
if scale is None:
if self.phase == 'TRAIN':
scale = (np.random.random() * 0.2 + 0.4) / imMean
else:
scale = 0.5 / imMean
image = np.clip( (image*scale), 0, 1).transpose([2, 0, 1] )
return image, scale
def loadMask(self, imName):
if not osp.isfile(imName ):
print('Error: %s does not exist.' % imName )
assert(False )
image = cv2.imread(imName, -1 )
image = cv2.resize(image, (self.imWidth, self.imHeight ), interpolation=cv2.INTER_LINEAR)
image = np.ascontiguousarray(image )[np.newaxis, :, :]
return image
def loadImage(self, imName, isGama = False):
if not os.path.isfile(imName):
print('Fail to load {0}'.format(imName) )
im = np.zeros([3, self.imSize, self.imSize], dtype=np.float32)
assert(False)
return im
im = Image.open(imName)
im = self.imResize(im)
im = np.asarray(im, dtype=np.float32)
if isGama:
im = (im / 255.0) ** 2.2
#im = 2 * im - 1
else:
#im = (im - 127.5) / 127.5
im = (im - 0) / 255
if len(im.shape) == 2:
im = im[:, np.newaxis]
im = np.transpose(im, [2, 0, 1])
return im
def imResize(self, im):
w0, h0 = im.size
if w0 != self.imHeight or h0 != self.imWidth:
im = im.resize( (self.imWidth, self.imHeight ), Image.ANTIALIAS)
return im
#-------------------------------------------------------------------------------------------------------------
#创建真实模型在虚拟环境中的数据集,batchloader 仅读取相机、环境图、gt模型
class BatchLoaderMyreal(Dataset):
def __init__(self, dataRoot, shapeRoot = None,
imHeight = 360, imWidth = 480,
envHeight = 256, envWidth = 512,
isRandom=False, phase='TRAIN', rseed = 1,
isLoadVH = False, isLoadEnvmap = False,
isLoadCam = False, isLoadOptim = False,
camNum = 10, shapeRs = 0, shapeRe = 1500, volumeSize=32, batchSize = None, isOptim = False, ignore = [],
isLoadSDF = True, grid_res = 8, bounding_radius = 1.1):
self.dataRoot = dataRoot
self.shapeRoot = shapeRoot
self.imHeight = imHeight
self.imWidth = imWidth
self.envHeight = envHeight
self.envWidth = envWidth
self.phase = phase.upper()
self.isLoadVH = isLoadVH
self.isLoadCam = isLoadCam
self.isLoadEnvmap = isLoadEnvmap
self.camNum = camNum
self.shapeRs = shapeRs
self.shapeRe = shapeRe
self.isLoadSDF = isLoadSDF
self.grid_res = grid_res
self.bounding_radius = bounding_radius
if batchSize is None:
batchSize = camNum
self.batchSize = min(batchSize , 10)
else:
self.batchSize = batchSize
self.minX, self.maxX = -bounding_radius, bounding_radius
self.minY, self.maxY = -bounding_radius, bounding_radius
self.minZ, self.maxZ = -bounding_radius, bounding_radius
self.volumeSize = volumeSize
y, x, z = np.meshgrid(
np.linspace(self.minX, self.maxX, volumeSize ),
np.linspace(self.minY, self.maxY, volumeSize ),
np.linspace(self.minZ, self.maxZ, volumeSize ) )
x = x[:, :, :, np.newaxis ]
y = y[:, :, :, np.newaxis ]
z = z[:, :, :, np.newaxis ]
coord = np.concatenate([x, y, z], axis=3 )
shapeList = sorted(glob.glob(osp.join(dataRoot, 'Shape__*') ))
if isLoadCam:
self.originArr = []
self.lookatArr = []
self.upArr = []
for n in range(max(0, shapeRs ), min(len(shapeList ), shapeRe ) ):
if n in ignore:
continue
shape = osp.join(shapeRoot, 'Shape__%d' % n )
if not osp.isdir(shape ):
continue
camFileName = osp.join(shape, 'cam%d.txt' % camNum )
with open(camFileName, 'r') as camIn:
camLines = camIn.readlines()
viewNum = int(camLines[0].strip() )
origins = []
lookats = []
ups = []
for n in range(0, viewNum ):
originStr = camLines[3*n+1 ].strip().split(' ')
lookatStr = camLines[3*n+2 ].strip().split(' ')
upStr = camLines[3*n+3 ].strip().split(' ')
origin = np.array([float(x) for x in originStr ])[np.newaxis, :]
lookat = np.array([float(x) for x in lookatStr ])[np.newaxis, :]
up = np.array([float(x) for x in upStr])[np.newaxis, :]
origins.append(origin.astype(np.float32 ) )
lookats.append(lookat.astype(np.float32 ) )
ups.append(up.astype(np.float32 ) )
origins = np.concatenate(origins, axis=0 )
lookats = np.concatenate(lookats, axis=0 )
ups = np.concatenate(ups, axis=0 )
self.originArr.append(origins )
self.lookatArr.append(lookats )
self.upArr.append(ups )
if isLoadEnvmap:
self.envList = []
self.scaleList = []
envListUnique = []
for n in range(max(0, shapeRs ), min(len(shapeList ), shapeRe ) ):
if n in ignore:
continue
shape = osp.join(shapeRoot, 'Shape__%d' % n )
if not osp.isdir(shape ):
continue
xmlFile = osp.join(shape, 'im.xml')
# Create rendering file for Depth maps
tree = et.parse(xmlFile )
root = tree.getroot()
shapes = root.findall('emitter')
assert(len(shapes ) == 1 )
for shape in shapes:
strings = shape.findall('string')
assert(len(strings) == 1 )
for st in strings:
envFileName = st.get('value')
envFileName = envFileName.replace('/home/zhl/CVPR20/TransparentShape','../')
if not osp.isfile(envFileName):
print(shapeList[n])
if not envFileName.find('1640')==-1:
print(shapeList[n])
floats = shape.findall('float')
assert(len(floats) == 1 )
for f in floats:
scale = float(f.get('value') )
self.envList.append(envFileName )
self.scaleList.append(scale )
if envFileName not in envListUnique:
envListUnique.append(envFileName )
print("Number of environment maps %d" % (len(envListUnique ) ) )
if rseed is not None:
random.seed(rseed)
# Permute the image list
self.count = camNum
self.perm = list(range(self.count ) )
if isRandom:
random.shuffle(self.perm)
def __len__(self):
return len(self.perm)
def __getitem__(self, ind):
# normalize the normal vector so that it will be unit length
origins = []
lookats = []
ups = []
envs = []
imScale = None
shapeList = glob.glob(osp.join(self.dataRoot, 'Shape__*'))
shapeId = ind
batchDict = {}
batchDict['data_path'] = osp.join(self.dataRoot, "Shape__%d" % (shapeId + self.shapeRs))
for imId in self.perm:
if self.isLoadCam:
origin = self.originArr[shapeId ][imId ]
lookat = self.lookatArr[shapeId ][imId ]
up = self.upArr[shapeId ][imId ]
origins.append(origin[np.newaxis, :] )
lookats.append(lookat[np.newaxis, :] )
ups.append(up[np.newaxis, :] )
if self.isLoadEnvmap:
envFileName = self.envList[shapeId ]
scale = self.scaleList[shapeId ]
env = cv2.imread(envFileName, -1)
if env is None:
print(envFileName)
env = env[:, :, ::-1]
env = cv2.resize(env, (self.envWidth, self.envHeight ), interpolation=cv2.INTER_LINEAR)
env = np.ascontiguousarray(env )
env = env.transpose([2, 0, 1]) * scale
envs.append(env[np.newaxis, :] )
if self.isLoadCam:
origins = np.concatenate(origins, axis=0 )
lookats = np.concatenate(lookats, axis=0 )
ups = np.concatenate(ups, axis=0 )
batchDict['origin'] = origins
batchDict['lookat'] = lookats
batchDict['up'] = ups
if self.isLoadEnvmap:
envs = np.concatenate(envs, axis=0 )
batchDict['env'] = envs
#读取sdf文件
if self.isLoadSDF:
shapePath = osp.join(self.shapeRoot, "Shape__%d" % (shapeId + self.shapeRs))
sdfName = osp.join(shapePath, 'visualHullSubd_%d_%d_sdf.npy' % (self.camNum,self.grid_res))
batchDict['shape_path'] = shapePath
gt_sdfName = osp.join(shapePath, 'object_sdf_%d.npy'%(self.grid_res))
if osp.isfile(gt_sdfName):
batchDict['gt_grid'] = np.load(gt_sdfName).astype(np.float)
else:
#gtName = osp.join(shapePath, 'meshGT_transform.ply')
gtName = osp.join(shapePath, 'object-1500000.obj')
gtmesh = trimesh.load(gtName)
linear_space = np.linspace(-self.bounding_radius, self.bounding_radius, self.grid_res)
grid_x, grid_y, grid_z = np.meshgrid(linear_space, linear_space, linear_space)
coords = np.stack((grid_x, grid_y, grid_z), axis=3)
query_points = coords.reshape((-1, 3))
gtsdfs = mesh_to_sdf(gtmesh, query_points, surface_point_method='sample', sign_method='normal',
bounding_radius=None, scan_count=100,
scan_resolution=400, sample_point_count=10000000, normal_sample_count=20)
gtsdfs = np.reshape(gtsdfs, grid_x.shape).transpose((1, 0, 2))
batchDict['gt_grid'] = gtsdfs
np.save(gt_sdfName, gtsdfs)
return batchDict
def loadHDR(self, imName, scale):
if not osp.isfile(imName ):
print('Error: %s does not exist.' % imName )
assert(False )
image = cv2.imread(imName, -1 )[:, :, ::-1]
image = cv2.resize(image, (self.imWidth, self.imHeight ), interpolation=cv2.INTER_LINEAR)
image = np.ascontiguousarray(image )
imMean = np.mean(image )
if scale is None:
if self.phase == 'TRAIN':
scale = (np.random.random() * 0.2 + 0.4) / imMean
else:
scale = 0.5 / imMean
image = (image*scale).transpose([2, 0, 1] )
#image = np.clip((image * scale), 0, 1).transpose([2, 0, 1])
return image, scale
def loadImage(self, imName, isGama = False):
if not os.path.isfile(imName):
print('Fail to load {0}'.format(imName) )
im = np.zeros([3, self.imSize, self.imSize], dtype=np.float32)
return im
im = Image.open(imName)
im = self.imResize(im)
im = np.asarray(im, dtype=np.float32)
if isGama:
im = (im / 255.0) ** 2.2
im = 2 * im - 1
else:
im = (im - 127.5) / 127.5
if len(im.shape) == 2:
im = im[:, np.newaxis]
im = np.transpose(im, [2, 0, 1])
return im
def imResize(self, im):
w0, h0 = im.size
if w0 != self.imHeight or h0 != self.imWidth:
im = im.resize( (self.imWidth, self.imHeight ), Image.ANTIALIAS)
return im
#-----------------------------------------------------------------------------------------------------------
#自己创建的真实模型虚拟环境数据集
class BatchLoaderMyReal(Dataset):
def __init__(self, dataRoot, shapeRoot = None,
imHeight = 360, imWidth = 480,
envHeight = 256, envWidth = 512,
isRandom=False, phase='TRAIN', rseed = 1,
isLoadVH = False, isLoadEnvmap = False,
isLoadCam = False, isLoadOptim = False,
camNum = 10, shapeRs = 0, shapeRe = 1500, volumeSize=32, batchSize = None, isOptim = False, ignore = [],
isLoadSDF = True, grid_res = 8, bounding_radius = 1.1):
self.dataRoot = dataRoot
self.shapeRoot = shapeRoot
self.imHeight = imHeight
self.imWidth = imWidth
self.envHeight = envHeight
self.envWidth = envWidth
self.phase = phase.upper()
self.isLoadVH = isLoadVH
self.isLoadCam = isLoadCam
self.isLoadEnvmap = isLoadEnvmap
self.isLoadOptim = isLoadOptim
self.camNum = camNum
self.shapeRs = shapeRs
self.shapeRe = shapeRe
self.isOptim = isOptim
self.isLoadSDF = isLoadSDF
self.grid_res = grid_res
self.bounding_radius = bounding_radius
if batchSize is None:
batchSize = camNum
self.batchSize = min(batchSize , 10)
else:
self.batchSize = batchSize
self.minX, self.maxX = -1.1, 1.1
self.minY, self.maxY = -1.1, 1.1
self.minZ, self.maxZ = -1.1, 1.1
self.volumeSize = volumeSize
y, x, z = np.meshgrid(
np.linspace(self.minX, self.maxX, volumeSize ),
np.linspace(self.minY, self.maxY, volumeSize ),
np.linspace(self.minZ, self.maxZ, volumeSize ) )
x = x[:, :, :, np.newaxis ]
y = y[:, :, :, np.newaxis ]
z = z[:, :, :, np.newaxis ]
coord = np.concatenate([x, y, z], axis=3 )
shapeList = glob.glob(osp.join(dataRoot, 'Shape__*') )
if isLoadCam:
self.originArr = []
self.lookatArr = []
self.upArr = []
for n in range(max(0, shapeRs ), min(len(shapeList ), shapeRe ) ):
if n in ignore:
continue
shape = osp.join(shapeRoot, 'Shape__%d' % n )
if not osp.isdir(shape ):
continue
camFileName = osp.join(shape, 'cam%d.txt' % camNum )
with open(camFileName, 'r') as camIn:
camLines = camIn.readlines()
viewNum = int(camLines[0].strip() )
origins = []
lookats = []
ups = []
for n in range(0, viewNum ):
originStr = camLines[3*n+1 ].strip().split(' ')
lookatStr = camLines[3*n+2 ].strip().split(' ')
upStr = camLines[3*n+3 ].strip().split(' ')
origin = np.array([float(x) for x in originStr ])[np.newaxis, :]
lookat = np.array([float(x) for x in lookatStr ])[np.newaxis, :]
up = np.array([float(x) for x in upStr])[np.newaxis, :]
origins.append(origin.astype(np.float32 ) )
lookats.append(lookat.astype(np.float32 ) )
ups.append(up.astype(np.float32 ) )
origins = np.concatenate(origins, axis=0 )
lookats = np.concatenate(lookats, axis=0 )
ups = np.concatenate(ups, axis=0 )
self.originArr.append(origins )
self.lookatArr.append(lookats )
self.upArr.append(ups )
if isLoadEnvmap:
self.envList = []
self.scaleList = []
envListUnique = []
for n in range(max(0, shapeRs ), min(len(shapeList ), shapeRe ) ):
if n in ignore:
continue
shape = osp.join(shapeRoot, 'Shape__%d' % n )
if not osp.isdir(shape ):
continue
xmlFile = osp.join(shape, 'im.xml')
# Create rendering file for Depth maps
tree = et.parse(xmlFile )
root = tree.getroot()
shapes = root.findall('emitter')
assert(len(shapes ) == 1 )
for shape in shapes:
strings = shape.findall('string')
assert(len(strings) == 1 )
for st in strings:
envFileName = st.get('value')
envFileName = envFileName.replace('/home/zhl/CVPR20/TransparentShape','../')
if not osp.isfile(envFileName):
print(shapeList[n])
if not envFileName.find('1640')==-1:
print(shapeList[n])
floats = shape.findall('float')
assert(len(floats) == 1 )
for f in floats:
scale = float(f.get('value') )
self.envList.append(envFileName )
self.scaleList.append(scale )
if envFileName not in envListUnique:
envListUnique.append(envFileName )
print("Number of environment maps %d" % (len(envListUnique ) ) )
self.imList = []
for n in range(max(0, shapeRs ), min(len(shapeList ), shapeRe ) ):
if n in ignore:
continue
shape = osp.join(dataRoot, 'Shape__%d' % n )
if not osp.isdir(shape ):
continue
imNames = sorted(glob.glob(osp.join(shape, 'im_*.npy' ) ) )
if isRandom:
random.shuffle(imNames )
if len(imNames ) < camNum:
print('%s: %d' % (shape, len(imNames) ) )
assert(False )
self.imList.append(imNames[0:camNum ] )
if rseed is not None:
random.seed(rseed)
# Permute the image list
self.count = len(self.imList)
self.perm = list(range(self.count ) )
if isRandom:
random.shuffle(self.perm)
def __len__(self):
return len(self.perm)
def __getitem__(self, ind):
# normalize the normal vector so that it will be unit length
imNames = self.imList[self.perm[ind ] ]
if self.batchSize < self.camNum:
random.shuffle(imNames )
#当batchsize 为 camnum - 1时,最后一个照片替换为法向量camNum
if self.isOptim:
count = 0
imNamesNew = []
for n in range(0, self.camNum ):
isAdd = False
imName = imNames[n]
if n == self.camNum-2:
isAdd = True
else:
twoNormalName = imName.replace('im_', 'imtwoNormalPred%d_' % (self.camNum ) )
if not osp.isfile(twoNormalName ):
isAdd = True
if isAdd == True:
imNamesNew.append(imName )
count += 1
if count == self.batchSize:
break
imNames = imNamesNew
else:
imNames = imNames[0:self.batchSize ]
segs = []
seg2s = []
normals = []
normal2s = []
depths = []
depth2s = []
ims = []
imEs = []
origins = []
lookats = []
ups = []
envs = []
segVHs = []
seg2VHs = []
normalVHs = []
normal2VHs = []
depthVHs = []
depth2VHs = []
normalOpts = []
normal2Opts = []
imScale = None
for imName in imNames:
twoBounceName = imName.replace('im_', 'imtwoBounce_')
if not osp.isfile(twoBounceName ):
twoBounceName = imName.replace('im_', 'imtwoBounce_').replace('.npy', '.h5')
hf = h5py.File(twoBounceName, 'r')
twoBounce = np.array(hf.get('data'), dtype=np.float32 )
hf.close()
else:
twoBounce = np.load(twoBounceName )
if twoBounce.shape[0] != self.imWidth or twoBounce.shape[1] != self.imHeight:
newTwoBounce1 = cv2.resize(twoBounce[:, :, 0:3], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce2 = cv2.resize(twoBounce[:, :, 3:6], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce3 = cv2.resize(twoBounce[:, :, 6], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce4 = cv2.resize(twoBounce[:, :, 7:10], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce5 = cv2.resize(twoBounce[:, :, 10:13], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce6 = cv2.resize(twoBounce[:, :, 13], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
twoBounce = np.concatenate((newTwoBounce1, newTwoBounce2, newTwoBounce3[:, :, np.newaxis],
newTwoBounce4, newTwoBounce5, newTwoBounce6[:, :, np.newaxis] ), axis=2)
normal = twoBounce[:, :, 0:3].transpose([2, 0, 1] )
normal = np.ascontiguousarray(normal )
seg = twoBounce[:, :, 6:7].transpose([2, 0, 1] ) > 0.9
seg = np.ascontiguousarray(seg.astype(np.float32) )
#seg = seg[:,:,::-1]
depth = twoBounce[:, :, 3:6].transpose([2, 0, 1] )
depth = np.ascontiguousarray(depth )
depth = depth * seg
normal2 = twoBounce[:, :, 7:10].transpose([2, 0, 1] )
normal2 = np.ascontiguousarray(normal2 )
seg2 = twoBounce[:, :, 13:14].transpose([2, 0, 1] ) > 0.9
seg2 = np.ascontiguousarray(seg2.astype(np.float32) )
depth2 = twoBounce[:, :, 10:13].transpose([2, 0, 1] )
depth2 = np.ascontiguousarray(depth2 )
depth2 = depth2 * seg
normal = normal / np.sqrt(np.maximum(np.sum(normal * normal, axis=0), 1e-10) )[np.newaxis, :]
normal = normal * seg
normal2 = normal2 / np.sqrt(np.maximum(np.sum(normal2 * normal2, axis=0), 1e-10) )[np.newaxis, :]
normal2 = normal2 * seg
# Read rendered images(照片value压缩到0~1)
imE, imScale = self.loadHDR(imName, imScale )
#imE = imE[:,:,::-1]
im = imE * seg
imId = int(imName.split('/')[-1].split('.')[0].split('_')[-1] )
shapeId = int(imName.split('/')[-2].split('_')[-1] ) - self.shapeRs
segs.append(seg[np.newaxis, :] )
seg2s.append(seg2[np.newaxis, :] )
normals.append(normal[np.newaxis, :] )
normal2s.append(normal2[np.newaxis, :] )
depths.append(depth[np.newaxis, :] )
depth2s.append(depth2[np.newaxis, :] )
ims.append(im[np.newaxis, :] )
imEs.append(imE[np.newaxis, :] )
# Load the rendering file
if self.isLoadCam:
origin = self.originArr[shapeId ][imId-1 ]
lookat = self.lookatArr[shapeId ][imId-1 ]
up = self.upArr[shapeId ][imId-1 ]
origins.append(origin[np.newaxis, :] )
lookats.append(lookat[np.newaxis, :] )
ups.append(up[np.newaxis, :] )
if self.isLoadEnvmap:
envFileName = self.envList[shapeId ]
scale = self.scaleList[shapeId ]
env = cv2.imread(envFileName, -1)
if env is None:
print(envFileName)
env = env[:, :, ::-1]
env = cv2.resize(env, (self.envWidth, self.envHeight ), interpolation=cv2.INTER_LINEAR)
env = np.ascontiguousarray(env )
env = env.transpose([2, 0, 1]) * imScale * scale
envs.append(env[np.newaxis, :] )
if self.isLoadVH:
twoBounceVHName = imName.replace('im_', 'imVH_%dtwoBounce_' % self.camNum )
if not osp.isfile(twoBounceVHName ):
twoBounceVHName = imName.replace('im_', 'imVH_%dtwoBounce_' % self.camNum ).replace('.npy', '.h5')
hf = h5py.File(twoBounceVHName, 'r')
twoBounceVH = np.array(hf.get('data'), dtype=np.float32 )
hf.close()
else:
twoBounceVH = np.load(twoBounceVHName )
if twoBounceVH.shape[0] != self.imWidth or twoBounceVH.shape[1] != self.imHeight:
newTwoBounce1 = cv2.resize(twoBounceVH[:, :, 0:3], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce2 = cv2.resize(twoBounceVH[:, :, 3:6], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce3 = cv2.resize(twoBounceVH[:, :, 6], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce4 = cv2.resize(twoBounceVH[:, :, 7:10], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce5 = cv2.resize(twoBounceVH[:, :, 10:13], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
newTwoBounce6 = cv2.resize(twoBounceVH[:, :, 13], (self.imWidth, self.imHeight ), interpolation=cv2.INTER_AREA )
twoBounceVH = np.concatenate((newTwoBounce1, newTwoBounce2, newTwoBounce3[:, :, np.newaxis],
newTwoBounce4, newTwoBounce5, newTwoBounce6[:, :, np.newaxis] ), axis=2)
normalVH = twoBounceVH[:, :, 0:3].transpose([2, 0, 1])
normalVH = np.ascontiguousarray(normalVH )
segVH = twoBounceVH[:, :, 6:7].transpose([2, 0, 1] ) > 0.9
segVH = np.ascontiguousarray(segVH.astype(np.float32) )
depthVH = twoBounceVH[:, :, 3:6].transpose([2, 0, 1])
depthVH = np.ascontiguousarray(depthVH )
depthVH = depthVH * segVH
normal2VH = twoBounceVH[:, :, 7:10].transpose([2, 0, 1])
normal2VH = np.ascontiguousarray(normal2VH )
seg2VH = twoBounceVH[:, :, 13:14].transpose([2, 0, 1] ) > 0.9
seg2VH = np.ascontiguousarray(seg2VH.astype(np.float32) )
depth2VH = twoBounceVH[:, :, 10:13].transpose([2, 0, 1])
depth2VH = np.ascontiguousarray(depth2VH )
depth2VH = depth2VH * segVH
normalVH = normalVH / np.sqrt(np.maximum(np.sum(normalVH * normalVH, axis=0), 1e-10) )[np.newaxis, :]
normalVH = normalVH * segVH
normal2VH = normal2VH / np.sqrt(np.maximum(np.sum(normal2VH * normal2VH, axis=0), 1e-10) )[np.newaxis, :]
normal2VH = normal2VH * segVH
segVHs.append(segVH[np.newaxis, :] )
seg2VHs.append(seg2VH[np.newaxis, :] )
normalVHs.append(normalVH[np.newaxis, :] )
normal2VHs.append(normal2VH[np.newaxis, :] )
depthVHs.append(depthVH[np.newaxis, :] )
depth2VHs.append(depth2VH[np.newaxis, :] )
if self.isLoadOptim:
twoNormalName = imName.replace('im_', 'imtwoNormalPred%d_' % (self.camNum ) )
if not osp.isfile(twoNormalName ):
twoNormalName = imName.replace('im_', 'imtwoNormalPred%d_' % (self.camNum ) ).replace('.npy', '.h5')
hf = h5py.File(twoNormalName, 'r')
twoNormals = np.array(hf.get('data'), dtype=np.float32 )
hf.close()
else:
twoNormals = np.load(twoNormalName )
normalOpt, normal2Opt = twoNormals[:, :, 0:3], twoNormals[:, :, 3:6]
normalOpt = cv2.resize(normalOpt, (self.imWidth, self.imHeight), interpolation = cv2.INTER_AREA )
normal2Opt = cv2.resize(normal2Opt, (self.imWidth, self.imHeight), interpolation = cv2.INTER_AREA )
normalOpt = np.ascontiguousarray(normalOpt.transpose([2, 0, 1] ) )
normal2Opt = np.ascontiguousarray(normal2Opt.transpose([2, 0, 1] ) )
normalOpt = normalOpt / np.sqrt(np.maximum(np.sum(normalOpt * normalOpt, axis=0), 1e-10) )[np.newaxis, :]
normalOpt = normalOpt * seg
normal2Opt = normal2Opt / np.sqrt(np.maximum(np.sum(normal2Opt * normal2Opt, axis=0), 1e-10) )[np.newaxis, :]
normal2Opt = normal2Opt * seg
normalOpts.append(normalOpt[np.newaxis, :] )
normal2Opts.append(normal2Opt[np.newaxis, :] )
segs = np.concatenate(segs, axis=0 )
seg2s = np.concatenate(seg2s, axis=0 )
normals = np.concatenate(normals, axis=0 )
normal2s = np.concatenate(normal2s, axis=0 )
depths = np.concatenate(depths, axis=0 )
depth2s = np.concatenate(depth2s, axis=0 )
ims = np.concatenate(ims, axis=0 )
imEs = np.concatenate(imEs, axis=0 )
batchDict = {'seg1': segs,
'seg2': seg2s,
'normal1': normals,
'normal2': normal2s,
'depth1': depths,
'depth2': depth2s,
'im': ims,
'imE': imEs,
'name': imNames }
if self.isLoadCam:
origins = np.concatenate(origins, axis=0 )
lookats = np.concatenate(lookats, axis=0 )
ups = np.concatenate(ups, axis=0 )
batchDict['origin'] = origins
batchDict['lookat'] = lookats
batchDict['up'] = ups
if self.isLoadEnvmap:
envs = np.concatenate(envs, axis=0 )
batchDict['env'] = envs
if self.isLoadVH:
segVHs = np.concatenate(segVHs, axis=0 )
seg2VHs = np.concatenate(seg2VHs, axis=0 )
normalVHs = np.concatenate(normalVHs, axis=0 )
normal2VHs = np.concatenate(normal2VHs, axis=0 )
depthVHs = np.concatenate(depthVHs, axis=0 )
depth2VHs = np.concatenate(depth2VHs, axis=0 )
batchDict['seg1VH'] = segVHs
batchDict['seg2VH'] = seg2VHs
batchDict['normal1VH'] = normalVHs
batchDict['normal2VH'] = normal2VHs
batchDict['depth1VH'] = depthVHs
batchDict['depth2VH'] = depth2VHs
if self.isLoadOptim:
normalOpts = np.concatenate(normalOpts, axis=0 )
normal2Opts = np.concatenate(normal2Opts, axis=0 )
batchDict['normalOpt'] = normalOpts
batchDict['normal2Opt'] = normal2Opts
#读取sdf文件
if self.isLoadSDF:
imName = imNames[0]
shapeId = imName.split('/')[-2]
shapePath = osp.join(self.shapeRoot, shapeId)
sdfName = osp.join(shapePath, 'visualHullSubd_%d_%d_sdf.npy' % (self.camNum,self.grid_res))
batchDict['shape_path'] = shapePath
if osp.isfile(sdfName):
batchDict['grid'] = np.load(sdfName).astype(np.float)
else:
VHName = osp.join(shapePath, 'visualHullSubd_%d.obj' % self.camNum)
mesh = trimesh.load(VHName)
linear_space = np.linspace(-self.bounding_radius, self.bounding_radius, self.grid_res)
grid_x, grid_y, grid_z = np.meshgrid(linear_space, linear_space, linear_space)
coords = np.stack((grid_x, grid_y, grid_z),axis=3)
query_points = coords.reshape((-1,3))
sdfs = mesh_to_sdf(mesh, query_points, surface_point_method='sample', sign_method='normal',
bounding_radius=None, scan_count=100,
scan_resolution=400, sample_point_count=10000000, normal_sample_count=11)
sdfs = np.reshape(sdfs, grid_x.shape).transpose((1,0,2))
batchDict['grid'] = sdfs
np.save(sdfName,sdfs)
grid_ress = [self.grid_res]
for grid_res in grid_ress:
gt_sdfName = osp.join(shapePath, 'object_sdf_%d.npy'%(grid_res))
if osp.isfile(gt_sdfName):
batchDict['gt_grid'] = np.load(gt_sdfName).astype(np.float)
else:
#gtName = osp.join(shapePath, 'meshGT_transform.ply')
gtName = osp.join(shapePath, 'object-1500000.obj')
gtmesh = trimesh.load(gtName)
linear_space = np.linspace(-self.bounding_radius, self.bounding_radius, grid_res)
grid_x, grid_y, grid_z = np.meshgrid(linear_space, linear_space, linear_space)
coords = np.stack((grid_x, grid_y, grid_z), axis=3)
query_points = coords.reshape((-1, 3))
gtsdfs = mesh_to_sdf(gtmesh, query_points, surface_point_method='sample', sign_method='normal',
bounding_radius=None, scan_count=100,
scan_resolution=400, sample_point_count=10000000, normal_sample_count=20)
gtsdfs = np.reshape(gtsdfs, grid_x.shape).transpose((1, 0, 2))
batchDict['gt_grid'] = gtsdfs
np.save(gt_sdfName, gtsdfs)
return batchDict
def loadHDR(self, imName, scale):
if not osp.isfile(imName ):
print('Error: %s does not exist.' % imName )
assert(False )
image = np.load(imName)[:, :, :]
image = cv2.resize(image, (self.imWidth, self.imHeight ), interpolation=cv2.INTER_LINEAR)
image = np.ascontiguousarray(image )
imMean = np.mean(image )
if scale is None:
if self.phase == 'TRAIN':
scale = (np.random.random() * 0.2 + 0.4) / imMean
else:
scale = 0.5 / imMean
image = (image*scale).transpose([2, 0, 1] )
#image = np.clip((image * scale), 0, 1).transpose([2, 0, 1])
return image, scale
def loadImage(self, imName, isGama = False):
if not os.path.isfile(imName):
print('Fail to load {0}'.format(imName) )
im = np.zeros([3, self.imSize, self.imSize], dtype=np.float32)
return im
im = Image.open(imName)
im = self.imResize(im)
im = np.asarray(im, dtype=np.float32)
if isGama:
im = (im / 255.0) ** 2.2
im = 2 * im - 1
else:
im = (im - 127.5) / 127.5
if len(im.shape) == 2:
im = im[:, np.newaxis]
im = np.transpose(im, [2, 0, 1])
return im
def imResize(self, im):
w0, h0 = im.size
if w0 != self.imHeight or h0 != self.imWidth:
im = im.resize( (self.imWidth, self.imHeight ), Image.ANTIALIAS)
return im
| 44.166175
| 135
| 0.52163
|
26dce4bd40b3a53d382d21e3a778d76eca390d22
| 361
|
py
|
Python
|
leetcode/python/problems/reverseInt.py
|
tuvshinot/algorithm-sorting-DS
|
784c2338fb92f9d2f4da6294f242563031a09c4c
|
[
"MIT"
] | null | null | null |
leetcode/python/problems/reverseInt.py
|
tuvshinot/algorithm-sorting-DS
|
784c2338fb92f9d2f4da6294f242563031a09c4c
|
[
"MIT"
] | null | null | null |
leetcode/python/problems/reverseInt.py
|
tuvshinot/algorithm-sorting-DS
|
784c2338fb92f9d2f4da6294f242563031a09c4c
|
[
"MIT"
] | null | null | null |
def int_reverser(x : int) -> int:
""" Reversing int using pop push
algorithm run time 12ms"""
rev = 0
minus = 1
if x < 0:
minus = -1
x = x * minus
while x != 0:
pop = x % 10
x = int(x / 10)
rev = rev * 10 + pop
if rev < -2**31 or rev > 2**31:
return 0
return rev * minus
| 21.235294
| 39
| 0.445983
|
bbfa524b8e1b4c3bcded363b5a912c15ea7e0ba4
| 1,719
|
py
|
Python
|
fluent.syntax/tests/syntax/test_ast_json.py
|
shlomyb-di/python-fluent
|
284507d5aed60a2d4bc9b4433ff7fef121529d6f
|
[
"Apache-2.0"
] | 155
|
2017-02-15T11:39:45.000Z
|
2022-03-15T19:06:58.000Z
|
fluent.syntax/tests/syntax/test_ast_json.py
|
shlomyb-di/python-fluent
|
284507d5aed60a2d4bc9b4433ff7fef121529d6f
|
[
"Apache-2.0"
] | 113
|
2017-03-14T16:47:57.000Z
|
2022-02-03T20:53:07.000Z
|
fluent.syntax/tests/syntax/test_ast_json.py
|
shlomyb-di/python-fluent
|
284507d5aed60a2d4bc9b4433ff7fef121529d6f
|
[
"Apache-2.0"
] | 18
|
2017-02-08T01:22:51.000Z
|
2021-12-21T03:07:34.000Z
|
import unittest
from tests.syntax import dedent_ftl
from fluent.syntax.ast import from_json
from fluent.syntax.parser import FluentParser
class TestASTJSON(unittest.TestCase):
maxDiff = None
def setUp(self):
self.parser = FluentParser()
def test_simple_resource(self):
input = """\
foo = Foo
"""
ast1 = self.parser.parse(dedent_ftl(input))
json1 = ast1.to_json()
ast2 = from_json(json1)
json2 = ast2.to_json()
self.assertEqual(json1, json2)
def test_complex_resource(self):
input = """\
### A Resource comment
# A comment about shared-photos
shared-photos =
{ $user_name } { $photo_count ->
[0] hasn't added any photos yet
[one] added a new photo
*[other] added { $photo_count } new photos
}.
## A Section comment
// A Syntax 0.4 comment about liked-comment
liked-comment =
{ $user_name } liked your comment on { $user_gender ->
[male] his
[female] her
*[other] their
} post.
"""
ast1 = self.parser.parse(dedent_ftl(input))
json1 = ast1.to_json()
ast2 = from_json(json1)
json2 = ast2.to_json()
self.assertEqual(json1, json2)
def test_syntax_error(self):
input = """\
foo = Foo {
"""
ast1 = self.parser.parse(dedent_ftl(input))
json1 = ast1.to_json()
ast2 = from_json(json1)
json2 = ast2.to_json()
self.assertEqual(json1, json2)
| 25.279412
| 70
| 0.52356
|
51d31db20ab142cb1529bba3a929437ff7d50029
| 6,247
|
py
|
Python
|
src/protean/adapters/event_store/__init__.py
|
mpsiva89/protean
|
315fa56da3f64178bbbf0edf1995af46d5eb3da7
|
[
"BSD-3-Clause"
] | null | null | null |
src/protean/adapters/event_store/__init__.py
|
mpsiva89/protean
|
315fa56da3f64178bbbf0edf1995af46d5eb3da7
|
[
"BSD-3-Clause"
] | null | null | null |
src/protean/adapters/event_store/__init__.py
|
mpsiva89/protean
|
315fa56da3f64178bbbf0edf1995af46d5eb3da7
|
[
"BSD-3-Clause"
] | null | null | null |
import importlib
import logging
from collections import defaultdict
from typing import List, Optional, Type
from protean import BaseEvent, BaseEventHandler
from protean.core.command import BaseCommand
from protean.core.command_handler import BaseCommandHandler
from protean.core.event_sourced_repository import (
BaseEventSourcedRepository,
event_sourced_repository_factory,
)
from protean.exceptions import ConfigurationError, NotSupportedError
from protean.utils import fqn
from protean.utils.mixins import Message
logger = logging.getLogger(__name__)
class EventStore:
def __init__(self, domain):
self.domain = domain
self._event_store = None
self._event_streams = None
self._command_streams = None
@property
def store(self):
if self._event_store is None:
self._initialize()
return self._event_store
def _initialize(self):
if not self._event_store:
logger.debug("Initializing Event Store...")
configured_event_store = self.domain.config["EVENT_STORE"]
if configured_event_store and isinstance(configured_event_store, dict):
event_store_full_path = configured_event_store["PROVIDER"]
event_store_module, event_store_class = event_store_full_path.rsplit(
".", maxsplit=1
)
event_store_cls = getattr(
importlib.import_module(event_store_module), event_store_class
)
store = event_store_cls(self.domain, configured_event_store)
else:
raise ConfigurationError(
"Configure at least one event store in the domain"
)
self._event_store = store
self._initialize_event_streams()
self._initialize_command_streams()
return self._event_store
def _initialize_event_streams(self):
self._event_streams = defaultdict(set)
for _, record in self.domain.registry.event_handlers.items():
stream_name = (
record.cls.meta_.stream_name
or record.cls.meta_.aggregate_cls.meta_.stream_name
)
self._event_streams[stream_name].add(record.cls)
def _initialize_command_streams(self):
self._command_streams = defaultdict(set)
for _, record in self.domain.registry.command_handlers.items():
self._command_streams[record.cls.meta_.aggregate_cls.meta_.stream_name].add(
record.cls
)
def repository_for(self, aggregate_cls):
if self._event_store is None:
self._initialize()
repository_cls = type(
aggregate_cls.__name__ + "Repository", (BaseEventSourcedRepository,), {}
)
repository_cls = event_sourced_repository_factory(
repository_cls, aggregate_cls=aggregate_cls
)
return repository_cls(self.domain)
def handlers_for(self, event: BaseEvent) -> List[BaseEventHandler]:
if self._event_streams is None:
self._initialize_event_streams()
all_stream_handlers = self._event_streams.get("$all", set())
stream_name = (
event.meta_.stream_name or event.meta_.aggregate_cls.meta_.stream_name
)
stream_handlers = self._event_streams.get(stream_name, set())
return set.union(stream_handlers, all_stream_handlers)
def command_handler_for(self, command: BaseCommand) -> Optional[BaseCommandHandler]:
if self._command_streams is None:
self._initialize_command_streams()
stream_name = command.meta_.stream_name or (
command.meta_.aggregate_cls.meta_.stream_name
if command.meta_.aggregate_cls
else None
)
if not stream_name:
return None
handler_classes = self._command_streams.get(stream_name, set())
# No command handlers have been configured to run this command
if len(handler_classes) == 0:
return None
# Ensure that a command has a unique handler across all handlers
# FIXME Perform this check on domain spin-up?
handler_methods = set()
for handler_cls in handler_classes:
try:
handler_method = next(
iter(handler_cls._handlers[fqn(command.__class__)])
)
handler_methods.add((handler_cls, handler_method))
except StopIteration:
pass
if len(handler_methods) > 1:
raise NotSupportedError(
f"Command {command.__class__.__name__} cannot be handled by multiple handlers"
)
return next(iter(handler_methods))[0] if handler_methods else None
def last_event_of_type(
self, event_cls: Type[BaseEvent], stream_name: str = None
) -> BaseEvent:
stream_name = stream_name or "$all"
events = [
event
for event in self.domain.event_store.store._read(stream_name)
if event["type"] == fqn(event_cls)
]
return Message.from_dict(events[-1]).to_object() if len(events) > 0 else None
def events_of_type(
self, event_cls: Type[BaseEvent], stream_name: str = None
) -> List[BaseEvent]:
"""Read events of a specific type in a given stream.
This is a utility method, especially useful for testing purposes, that retrives events of a
specific type from the event store.
If no stream is specified, events of the requested type will be retrieved from all streams.
:param event_cls: Class of the event type to be retrieved
:param stream_name: Stream from which events are to be retrieved
:type event_cls: BaseEvent Class
:type stream_name: String, optional, default is `None`
:return: A list of events of `event_cls` type
:rtype: list
"""
stream_name = stream_name or "$all"
return [
Message.from_dict(event).to_object()
for event in self.domain.event_store.store._read(stream_name)
if event["type"] == fqn(event_cls)
]
| 34.899441
| 99
| 0.643669
|
bf998f130a7f99fb546a0792635172e74ed0de07
| 1,876
|
py
|
Python
|
userbot/plugins/eval.py
|
sudo-akashi/SudoBot
|
7e82b2db0475182705b14e30f635a14ad1d0f482
|
[
"Apache-2.0"
] | 2
|
2020-07-26T02:48:25.000Z
|
2020-07-27T02:22:01.000Z
|
userbot/plugins/eval.py
|
sudo-akashi/SudoBot
|
7e82b2db0475182705b14e30f635a14ad1d0f482
|
[
"Apache-2.0"
] | null | null | null |
userbot/plugins/eval.py
|
sudo-akashi/SudoBot
|
7e82b2db0475182705b14e30f635a14ad1d0f482
|
[
"Apache-2.0"
] | 3
|
2020-07-25T18:16:43.000Z
|
2020-08-15T10:42:41.000Z
|
from telethon import events, errors, functions, types
import inspect
import traceback
import asyncio
import sys
import io
from uniborg.util import admin_cmd
@borg.on(admin_cmd("eval", allow_sudo=True))
@borg.on(admin_cmd("eval"))
async def _(event):
if event.fwd_from:
return
await event.edit("Processing ...")
cmd = event.text.split(" ", maxsplit=1)[1]
reply_to_id = event.message.id
if event.reply_to_msg_id:
reply_to_id = event.reply_to_msg_id
old_stderr = sys.stderr
old_stdout = sys.stdout
redirected_output = sys.stdout = io.StringIO()
redirected_error = sys.stderr = io.StringIO()
stdout, stderr, exc = None, None, None
try:
await aexec(cmd, event)
except Exception:
exc = traceback.format_exc()
stdout = redirected_output.getvalue()
stderr = redirected_error.getvalue()
sys.stdout = old_stdout
sys.stderr = old_stderr
evaluation = ""
if exc:
evaluation = exc
elif stderr:
evaluation = stderr
elif stdout:
evaluation = stdout
else:
evaluation = "Success"
final_output = "**EVAL**: `{}` \n\n **OUTPUT**: \n`{}` \n".format(cmd, evaluation)
if len(final_output) > Config.MAX_MESSAGE_SIZE_LIMIT:
with io.BytesIO(str.encode(final_output)) as out_file:
out_file.name = "eval.text"
await borg.send_file(
event.chat_id,
out_file,
force_document=True,
allow_cache=False,
caption=cmd,
reply_to=reply_to_id
)
await event.delete()
else:
await event.edit(final_output)
async def aexec(code, event):
exec(
f'async def __aexec(event): ' +
''.join(f'\n {l}' for l in code.split('\n'))
)
return await locals()['__aexec'](event)
| 26.422535
| 86
| 0.608742
|
a322c1f7004630d8ea582f9e23494de758ae980b
| 196
|
py
|
Python
|
oms_cms/backend/partners/apps.py
|
Hamel007/oms_cms
|
a120b27932fe1bd89f2c621c181b80b19caba0e0
|
[
"BSD-3-Clause"
] | 18
|
2019-07-11T18:34:10.000Z
|
2021-11-20T06:34:39.000Z
|
oms_cms/backend/partners/apps.py
|
Hamel007/oms_cms
|
a120b27932fe1bd89f2c621c181b80b19caba0e0
|
[
"BSD-3-Clause"
] | 13
|
2019-07-24T11:27:58.000Z
|
2022-03-28T01:07:31.000Z
|
oms_cms/backend/partners/apps.py
|
Hamel007/oms_cms
|
a120b27932fe1bd89f2c621c181b80b19caba0e0
|
[
"BSD-3-Clause"
] | 18
|
2019-07-08T18:07:21.000Z
|
2021-11-03T10:33:07.000Z
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class PartnersConfig(AppConfig):
name = 'oms_cms.backend.partners'
verbose_name = _('Партнеры')
| 24.5
| 55
| 0.77551
|
457c35de6823a9823cf350f8d0a31a354fc8da67
| 2,513
|
py
|
Python
|
Scripts/settings.py
|
muntasirraihan/timestamed-ycsb
|
56e3f23c0b5d6c19ba0c4ddfe228891f6cccec58
|
[
"Apache-2.0"
] | 5
|
2015-10-07T13:37:56.000Z
|
2019-11-26T10:01:27.000Z
|
Scripts/settings.py
|
muntasirraihan/PCAP
|
56e3f23c0b5d6c19ba0c4ddfe228891f6cccec58
|
[
"Apache-2.0"
] | null | null | null |
Scripts/settings.py
|
muntasirraihan/PCAP
|
56e3f23c0b5d6c19ba0c4ddfe228891f6cccec58
|
[
"Apache-2.0"
] | null | null | null |
import os
# clusters used for running cassandra, ycsb, and consistency computation
db_cluster = "cloud-test"
comp_cluster = "cluster-test"
# Objects used for determining host lists
all_hosts = range(0,0)
bad_hosts = []
hosts_prefix = "10.1.1."
# Directories
#home_dir = os.environ['HOME']
home_dir = "/proj/ISS"
base_dir = home_dir + "/conbench"
script_dir = base_dir + "/scripts"
output_dir = base_dir + "/scripts"
cassandra_source_dir = base_dir + "/apache-cassandra-1.2.4"
cassandra_target_dir = "/mnt/cassandra_" + os.getenv("USER")
#cassandra_data_dir = "/tmp/data" + os.getenv("USER")
cassandra_data_dir = "/mnt/data_" + os.getenv("USER")
#cassandra_data_dir = "/tmp/data"
ycsb_dir = base_dir + "/ycsb-0.1.4"
slf_dir = base_dir + "/slf4j-1.6.4"
cloning_dir = base_dir + "/cloning-1.8.1"
jmxterm_jar = base_dir + "/jmxterm-1.0-alpha-4-uber/jmxterm-1.0-alpha-4-uber.jar"
comp_dir = base_dir + "/consistency_analysis/ConsistencyAnalysis/ProbCAPComputation"
# path to ntpq command
ntpq_dir = home_dir + "/ntp/ntp-4.2.6p5/ntpq"
# directory for logging consistency data
clog_dir = output_dir + "/CLOG"
# directory for logging readdelay data
dlog_dir = output_dir + "/RDLOG"
# How many YCSB threads to run
YCSB_threads = 8
YCSB_threads_for_load = 8
# Number of records and number of seconds for YCSB load phase
num_seconds_to_load = 20 # zero for unlimited time
# Number of seconds for YCSB run phase
num_seconds_to_run = 60
# Target number of operations per second per host
target_thr_per_host = 10000 # zero for unlimited rate
# turn consistency instrumentation on
instrument = 1
instrument_PBS = 0
instrument_YCSBPP = 0
# storage settings
replication_factor = 3
# Experiment should stop after load phase
stop_after_load_phase = False
# Set this to True to skip copying storage system binaries
skip_db_copy = False
# cosistency settings
read_consistency = "ONE"
write_consistency = "ONE"
read_delay = "7"
# service level agreement settings
# (1-PA) % of operations should complete before TA ms time
TA = 80
PA = .1
PC = 0
# for now, (1-PC) % of read operations that start atleast TC time after a write should return the value of that write
TC = 6
#PC will be computed and adapted by the control system
TP = 0
# TP will be computed later, actually it is the average network delay
alpha = 0
read_delay_inc = 1 # for now hack, using same variables to control both read delay and read repair rate
run_again = True
network_delay = False
error_tolerance = .01
read_repair_chance = 0.1
| 25.383838
| 118
| 0.74811
|
4491f156e030ff926c789f92f485a3f86fb6c07e
| 1,603
|
py
|
Python
|
simple-backend/nlpviewer_backend/handlers/user.py
|
aerinzhang/stave
|
5ffc8e3a914664f669f5f0d747f66fd2ed418da5
|
[
"Apache-2.0"
] | null | null | null |
simple-backend/nlpviewer_backend/handlers/user.py
|
aerinzhang/stave
|
5ffc8e3a914664f669f5f0d747f66fd2ed418da5
|
[
"Apache-2.0"
] | null | null | null |
simple-backend/nlpviewer_backend/handlers/user.py
|
aerinzhang/stave
|
5ffc8e3a914664f669f5f0d747f66fd2ed418da5
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from django.urls import include, path
from django.http import HttpResponse, JsonResponse
from django.forms import model_to_dict
import json
from ..models import User
from ..lib.require_login import require_login
@require_login
def listAll(request):
users = User.objects.all().values()
return JsonResponse(list(users), safe=False)
@require_login
def create(request):
received_json_data = json.loads(request.body)
user = User(
name=received_json_data.get('name'),
password=received_json_data.get('password')
)
user.save()
userJson = model_to_dict(user)
return JsonResponse(userJson, safe=False)
def signup(request):
received_json_data = json.loads(request.body)
user = User(
name=received_json_data.get('name'),
password=received_json_data.get('password')
)
user.save()
userJson = model_to_dict(user)
return JsonResponse(userJson, safe=False)
@require_login
def edit(request, user_id):
user = User.objects.get(pk=user_id)
received_json_data = json.loads(request.body)
user.name = received_json_data.get('name')
user.password = received_json_data.get('password')
user.save()
userJson = model_to_dict(user)
return JsonResponse(userJson, safe=False)
@require_login
def delete(request, user_id):
user = User.objects.get(pk=user_id)
user.delete()
return HttpResponse('ok')
@require_login
def query(request, user_id):
userJson = model_to_dict(
User.objects.get(pk=user_id))
return JsonResponse(userJson, safe=False)
| 23.573529
| 54
| 0.717405
|
5a946acac2baa71ad3c46f101859e7c2c497ccd1
| 32,157
|
py
|
Python
|
disaggregator/build/pandas/pandas/tseries/tdi.py
|
pjkundert/wikienergy
|
ac3a13780bccb001c81d6f8ee27d3f5706cfa77e
|
[
"MIT"
] | 29
|
2015-01-08T19:20:37.000Z
|
2021-04-20T08:25:56.000Z
|
disaggregator/build/pandas/pandas/tseries/tdi.py
|
afcarl/wikienergy
|
ac3a13780bccb001c81d6f8ee27d3f5706cfa77e
|
[
"MIT"
] | null | null | null |
disaggregator/build/pandas/pandas/tseries/tdi.py
|
afcarl/wikienergy
|
ac3a13780bccb001c81d6f8ee27d3f5706cfa77e
|
[
"MIT"
] | 17
|
2015-02-01T18:12:04.000Z
|
2020-06-15T14:13:04.000Z
|
""" implement the TimedeltaIndex """
import operator
import datetime
from datetime import timedelta
import numpy as np
from pandas.core.common import (ABCSeries, _TD_DTYPE, _INT64_DTYPE,
is_timedelta64_dtype, _maybe_box,
_values_from_object, isnull)
from pandas.core.index import Index, Int64Index
import pandas.compat as compat
from pandas.compat import u
from pandas.core.base import PandasObject
from pandas.util.decorators import cache_readonly
from pandas.tseries.frequencies import to_offset
import pandas.core.common as com
from pandas.tseries import timedeltas
from pandas.tseries.base import DatetimeIndexOpsMixin
from pandas.tseries.timedeltas import to_timedelta, _coerce_scalar_to_timedelta_type
import pandas.tseries.offsets as offsets
from pandas.tseries.offsets import Tick, DateOffset
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.algos as _algos
import pandas.index as _index
Timedelta = tslib.Timedelta
_resolution_map = {
'ns' : offsets.Nano,
'us' : offsets.Micro,
'ms' : offsets.Milli,
's' : offsets.Second,
'm' : offsets.Minute,
'h' : offsets.Hour,
'D' : offsets.Day,
}
def _td_index_cmp(opname, nat_result=False):
"""
Wrap comparison operations to convert timedelta-like to timedelta64
"""
def wrapper(self, other):
func = getattr(super(TimedeltaIndex, self), opname)
if _is_convertible_to_td(other):
other = _to_m8(other)
result = func(other)
if com.isnull(other):
result.fill(nat_result)
else:
if not com.is_list_like(other):
raise TypeError("cannot compare a TimedeltaIndex with type {0}".format(type(other)))
other = TimedeltaIndex(other).values
result = func(other)
result = _values_from_object(result)
if isinstance(other, Index):
o_mask = other.values.view('i8') == tslib.iNaT
else:
o_mask = other.view('i8') == tslib.iNaT
if o_mask.any():
result[o_mask] = nat_result
mask = self.asi8 == tslib.iNaT
if mask.any():
result[mask] = nat_result
# support of bool dtype indexers
if com.is_bool_dtype(result):
return result
return Index(result)
return wrapper
class TimedeltaIndex(DatetimeIndexOpsMixin, Int64Index):
"""
Immutable ndarray of timedelta64 data, represented internally as int64, and
which can be boxed to timedelta objects
Parameters
----------
data : array-like (1-dimensional), optional
Optional timedelta-like data to construct index with
unit: unit of the arg (D,h,m,s,ms,us,ns) denote the unit, optional
which is an integer/float number
freq: a frequency for the index, optional
copy : bool
Make a copy of input ndarray
start : starting value, timedelta-like, optional
If data is None, start is used as the start point in generating regular
timedelta data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end time, timedelta-like, optional
If periods is none, generated index will extend to first conforming
time on or just past end argument
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
name : object
Name to be stored in the index
"""
_typ = 'timedeltaindex'
_join_precedence = 10
def _join_i8_wrapper(joinf, **kwargs):
return DatetimeIndexOpsMixin._join_i8_wrapper(joinf, dtype='m8[ns]', **kwargs)
_inner_indexer = _join_i8_wrapper(_algos.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(_algos.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(_algos.left_join_indexer_int64)
_left_indexer_unique = _join_i8_wrapper(
_algos.left_join_indexer_unique_int64, with_indexers=False)
_arrmap = None
_datetimelike_ops = ['days','hours','minutes','seconds','milliseconds','microseconds',
'nanoseconds','freq','components']
__eq__ = _td_index_cmp('__eq__')
__ne__ = _td_index_cmp('__ne__', nat_result=True)
__lt__ = _td_index_cmp('__lt__')
__gt__ = _td_index_cmp('__gt__')
__le__ = _td_index_cmp('__le__')
__ge__ = _td_index_cmp('__ge__')
_engine_type = _index.TimedeltaEngine
_comparables = ['name','freq']
_attributes = ['name','freq']
_is_numeric_dtype = True
freq = None
def __new__(cls, data=None, unit=None,
freq=None, start=None, end=None, periods=None,
copy=False, name=None,
closed=None, verify_integrity=True, **kwargs):
if isinstance(data, TimedeltaIndex) and freq is None:
if copy:
data = data.copy()
return data
freq_infer = False
if not isinstance(freq, DateOffset):
# if a passed freq is None, don't infer automatically
if freq != 'infer':
freq = to_offset(freq)
else:
freq_infer = True
freq = None
if periods is not None:
if com.is_float(periods):
periods = int(periods)
elif not com.is_integer(periods):
raise ValueError('Periods must be a number, got %s' %
str(periods))
if data is None and freq is None:
raise ValueError("Must provide freq argument if no data is "
"supplied")
if data is None:
return cls._generate(start, end, periods, name, freq,
closed=closed)
if unit is not None:
data = to_timedelta(data, unit=unit, box=False)
if not isinstance(data, (np.ndarray, Index, ABCSeries)):
if np.isscalar(data):
raise ValueError('TimedeltaIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
# convert if not already
if getattr(data,'dtype',None) != _TD_DTYPE:
data = to_timedelta(data,unit=unit,box=False)
elif copy:
data = np.array(data,copy=True)
# check that we are matching freqs
if verify_integrity and len(data) > 0:
if freq is not None and not freq_infer:
index = cls._simple_new(data, name=name)
inferred = index.inferred_freq
if inferred != freq.freqstr:
on_freq = cls._generate(index[0], None, len(index), name, freq)
if not np.array_equal(index.asi8, on_freq.asi8):
raise ValueError('Inferred frequency {0} from passed timedeltas does not '
'conform to passed frequency {1}'.format(inferred, freq.freqstr))
index.freq = freq
return index
if freq_infer:
index = cls._simple_new(data, name=name)
inferred = index.inferred_freq
if inferred:
index.freq = to_offset(inferred)
return index
return cls._simple_new(data, name=name, freq=freq)
@classmethod
def _generate(cls, start, end, periods, name, offset, closed=None):
if com._count_not_none(start, end, periods) != 2:
raise ValueError('Must specify two of start, end, or periods')
if start is not None:
start = Timedelta(start)
if end is not None:
end = Timedelta(end)
left_closed = False
right_closed = False
if start is None and end is None:
if closed is not None:
raise ValueError("Closed has to be None if not both of start"
"and end are defined")
if closed is None:
left_closed = True
right_closed = True
elif closed == "left":
left_closed = True
elif closed == "right":
right_closed = True
else:
raise ValueError("Closed has to be either 'left', 'right' or None")
index = _generate_regular_range(start, end, periods, offset)
index = cls._simple_new(index, name=name, freq=offset)
if not left_closed:
index = index[1:]
if not right_closed:
index = index[:-1]
return index
@property
def _box_func(self):
return lambda x: Timedelta(x,unit='ns')
@classmethod
def _simple_new(cls, values, name=None, freq=None, **kwargs):
if not getattr(values,'dtype',None):
values = np.array(values,copy=False)
if values.dtype == np.object_:
values = tslib.array_to_timedelta64(values)
if values.dtype != _TD_DTYPE:
values = com._ensure_int64(values).view(_TD_DTYPE)
result = object.__new__(cls)
result._data = values
result.name = name
result.freq = freq
result._reset_identity()
return result
_na_value = tslib.NaT
"""The expected NA value to use with this index."""
@property
def _formatter_func(self):
from pandas.core.format import _get_format_timedelta64
return _get_format_timedelta64(self, box=True)
def _format_footer(self):
tagline = 'Length: %d, Freq: %s'
return tagline % (len(self), self.freqstr)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
super(TimedeltaIndex, self).__setstate__(state)
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def _add_delta(self, delta):
if isinstance(delta, (Tick, timedelta, np.timedelta64)):
new_values = self._add_delta_td(delta)
elif isinstance(delta, TimedeltaIndex):
new_values = self._add_delta_tdi(delta)
else:
raise ValueError("cannot add the type {0} to a TimedeltaIndex".format(type(delta)))
result = TimedeltaIndex(new_values, freq='infer')
return result
def _evaluate_with_timedelta_like(self, other, op, opstr):
# allow division by a timedelta
if opstr in ['__div__','__truediv__']:
if _is_convertible_to_td(other):
other = Timedelta(other)
if isnull(other):
raise NotImplementedError("division by pd.NaT not implemented")
i8 = self.asi8
result = i8/float(other.value)
result = self._maybe_mask_results(result,convert='float64')
return Index(result,name=self.name,copy=False)
return NotImplemented
def _add_datelike(self, other):
# adding a timedeltaindex to a datetimelike
from pandas import Timestamp, DatetimeIndex
other = Timestamp(other)
i8 = self.asi8
result = i8 + other.value
result = self._maybe_mask_results(result,fill_value=tslib.iNaT)
return DatetimeIndex(result,name=self.name,copy=False)
def _sub_datelike(self, other):
raise TypeError("cannot subtract a datelike from a TimedeltaIndex")
def _format_native_types(self, na_rep=u('NaT'),
date_format=None, **kwargs):
from pandas.core.format import Timedelta64Formatter
return Timedelta64Formatter(values=self,
nat_rep=na_rep,
justify='all').get_result()
def _get_field(self, m):
values = self.asi8
hasnans = self.hasnans
if hasnans:
result = np.empty(len(self), dtype='float64')
mask = values == tslib.iNaT
imask = ~mask
result.flat[imask] = np.array([ getattr(Timedelta(val),m) for val in values[imask] ])
result[mask] = np.nan
else:
result = np.array([ getattr(Timedelta(val),m) for val in values ],dtype='int64')
return result
@property
def days(self):
""" The number of integer days for each element """
return self._get_field('days')
@property
def hours(self):
""" The number of integer hours for each element """
return self._get_field('hours')
@property
def minutes(self):
""" The number of integer minutes for each element """
return self._get_field('minutes')
@property
def seconds(self):
""" The number of integer seconds for each element """
return self._get_field('seconds')
@property
def milliseconds(self):
""" The number of integer milliseconds for each element """
return self._get_field('milliseconds')
@property
def microseconds(self):
""" The number of integer microseconds for each element """
return self._get_field('microseconds')
@property
def nanoseconds(self):
""" The number of integer nanoseconds for each element """
return self._get_field('nanoseconds')
@property
def components(self):
"""
Return a dataframe of the components of the Timedeltas
Returns
-------
a DataFrame
"""
from pandas import DataFrame
columns = ['days','hours','minutes','seconds','milliseconds','microseconds','nanoseconds']
hasnans = self.hasnans
if hasnans:
def f(x):
if isnull(x):
return [np.nan]*len(columns)
return x.components
else:
def f(x):
return x.components
result = DataFrame([ f(x) for x in self ])
result.columns = columns
if not hasnans:
result = result.astype('int64')
return result
def summary(self, name=None):
formatter = self._formatter_func
if len(self) > 0:
index_summary = ', %s to %s' % (formatter(self[0]),
formatter(self[-1]))
else:
index_summary = ''
if name is None:
name = type(self).__name__
result = '%s: %s entries%s' % (com.pprint_thing(name),
len(self), index_summary)
if self.freq:
result += '\nFreq: %s' % self.freqstr
return result
def to_pytimedelta(self):
"""
Return TimedeltaIndex as object ndarray of datetime.timedelta objects
Returns
-------
datetimes : ndarray
"""
return tslib.ints_to_pytimedelta(self.asi8)
def astype(self, dtype):
dtype = np.dtype(dtype)
if dtype == np.object_:
return self.asobject
elif dtype == _INT64_DTYPE:
return self.asi8.copy()
elif dtype == _TD_DTYPE:
return self
elif dtype.kind == 'm':
# return an index (essentially this is division)
result = self.values.astype(dtype)
if self.hasnans:
return Index(self._maybe_mask_results(result,convert='float64'),name=self.name)
return Index(result.astype('i8'),name=self.name)
else: # pragma: no cover
raise ValueError('Cannot cast TimedeltaIndex to dtype %s' % dtype)
def union(self, other):
"""
Specialized union for TimedeltaIndex objects. If combine
overlapping ranges with the same DateOffset, will be much
faster than Index.union
Parameters
----------
other : TimedeltaIndex or array-like
Returns
-------
y : Index or TimedeltaIndex
"""
if _is_convertible_to_index(other):
try:
other = TimedeltaIndex(other)
except TypeError:
pass
this, other = self, other
if this._can_fast_union(other):
return this._fast_union(other)
else:
result = Index.union(this, other)
if isinstance(result, TimedeltaIndex):
if result.freq is None:
result.freq = to_offset(result.inferred_freq)
return result
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
name = self.name
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat = to_concat + list(other)
else:
to_concat.append(other)
for obj in to_concat:
if isinstance(obj, Index) and obj.name != name:
name = None
break
to_concat = self._ensure_compat_concat(to_concat)
return Index(com._concat_compat(to_concat), name=name)
def join(self, other, how='left', level=None, return_indexers=False):
"""
See Index.join
"""
if _is_convertible_to_index(other):
try:
other = TimedeltaIndex(other)
except (TypeError, ValueError):
pass
return Index.join(self, other, how=how, level=level,
return_indexers=return_indexers)
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
if (isinstance(other, TimedeltaIndex) and self.freq == other.freq
and self._can_fast_union(other)):
joined = self._shallow_copy(joined)
joined.name = name
return joined
else:
return self._simple_new(joined, name)
def _can_fast_union(self, other):
if not isinstance(other, TimedeltaIndex):
return False
freq = self.freq
if freq is None or freq != other.freq:
return False
if not self.is_monotonic or not other.is_monotonic:
return False
if len(self) == 0 or len(other) == 0:
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
right_start = right[0]
left_end = left[-1]
# Only need to "adjoin", not overlap
return (right_start == left_end + freq) or right_start in left
def _fast_union(self, other):
if len(other) == 0:
return self.view(type(self))
if len(self) == 0:
return other.view(type(self))
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_start, left_end = left[0], left[-1]
right_end = right[-1]
# concatenate
if left_end < right_end:
loc = right.searchsorted(left_end, side='right')
right_chunk = right.values[loc:]
dates = com._concat_compat((left.values, right_chunk))
return self._shallow_copy(dates)
else:
return left
def __array_finalize__(self, obj):
if self.ndim == 0: # pragma: no cover
return self.item()
self.name = getattr(obj, 'name', None)
self.freq = getattr(obj, 'freq', None)
self._reset_identity()
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
return self._simple_new(result, name=name, freq=None)
def intersection(self, other):
"""
Specialized intersection for TimedeltaIndex objects. May be much faster
than Index.intersection
Parameters
----------
other : TimedeltaIndex or array-like
Returns
-------
y : Index or TimedeltaIndex
"""
if not isinstance(other, TimedeltaIndex):
try:
other = TimedeltaIndex(other)
except (TypeError, ValueError):
pass
result = Index.intersection(self, other)
return result
if len(self) == 0:
return self
if len(other) == 0:
return other
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
end = min(left[-1], right[-1])
start = right[0]
if end < start:
return type(self)(data=[])
else:
lslice = slice(*left.slice_locs(start, end))
left_chunk = left.values[lslice]
return self._shallow_copy(left_chunk)
def _possibly_promote(self, other):
if other.inferred_type == 'timedelta':
other = TimedeltaIndex(other)
return self, other
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
if _is_convertible_to_td(key):
key = Timedelta(key)
return self.get_value_maybe_box(series, key)
try:
return _maybe_box(self, Index.get_value(self, series, key), series, key)
except KeyError:
try:
loc = self._get_string_slice(key)
return series[loc]
except (TypeError, ValueError, KeyError):
pass
try:
return self.get_value_maybe_box(series, key)
except (TypeError, ValueError, KeyError):
raise KeyError(key)
def get_value_maybe_box(self, series, key):
if not isinstance(key, Timedelta):
key = Timedelta(key)
values = self._engine.get_value(_values_from_object(series), key)
return _maybe_box(self, values, series, key)
def get_loc(self, key):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
if _is_convertible_to_td(key):
key = Timedelta(key)
return self._engine.get_loc(key)
try:
return Index.get_loc(self, key)
except (KeyError, ValueError):
try:
return self._get_string_slice(key)
except (TypeError, KeyError, ValueError):
pass
try:
stamp = Timedelta(key)
return self._engine.get_loc(stamp)
except (KeyError, ValueError):
raise KeyError(key)
def _maybe_cast_slice_bound(self, label, side):
"""
If label is a string, cast it to timedelta according to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
Returns
-------
bound : Timedelta or object
"""
if isinstance(label, compat.string_types):
parsed = _coerce_scalar_to_timedelta_type(label, box=True)
lbound = parsed.round(parsed.resolution)
if side == 'left':
return lbound
else:
return (lbound + _resolution_map[parsed.resolution]() -
Timedelta(1, 'ns'))
return label
def _get_string_slice(self, key, use_lhs=True, use_rhs=True):
freq = getattr(self, 'freqstr',
getattr(self, 'inferred_freq', None))
loc = self._partial_td_slice(key, freq, use_lhs=use_lhs,
use_rhs=use_rhs)
return loc
def _partial_td_slice(self, key, freq, use_lhs=True, use_rhs=True):
# given a key, try to figure out a location for a partial slice
if not isinstance(key, compat.string_types):
return key
parsed = _coerce_scalar_to_timedelta_type(key, box=True)
is_monotonic = self.is_monotonic
# figure out the resolution of the passed td
# and round to it
reso = parsed.resolution
t1 = parsed.round(reso)
t2 = t1 + _resolution_map[reso]() - Timedelta(1,'ns')
stamps = self.asi8
if is_monotonic:
# we are out of range
if len(stamps) and (
(use_lhs and t1.value < stamps[0] and t2.value < stamps[0]) or (
(use_rhs and t1.value > stamps[-1] and t2.value > stamps[-1]))):
raise KeyError
# a monotonic (sorted) series can be sliced
left = stamps.searchsorted(t1.value, side='left') if use_lhs else None
right = stamps.searchsorted(t2.value, side='right') if use_rhs else None
return slice(left, right)
lhs_mask = (stamps >= t1.value) if use_lhs else True
rhs_mask = (stamps <= t2.value) if use_rhs else True
# try to find a the dates
return (lhs_mask & rhs_mask).nonzero()[0]
def __getitem__(self, key):
getitem = self._data.__getitem__
if np.isscalar(key):
val = getitem(key)
return Timedelta(val)
else:
if com._is_bool_indexer(key):
key = np.asarray(key)
if key.all():
key = slice(0,None,None)
else:
key = lib.maybe_booleans_to_slice(key.view(np.uint8))
result = getitem(key)
if result.ndim > 1:
return result
return self._simple_new(result, self.name)
@property
def freqstr(self):
""" return the frequency object as a string if its set, otherwise None """
if self.freq is None:
return None
return self.freq
def searchsorted(self, key, side='left'):
if isinstance(key, (np.ndarray, Index)):
key = np.array(key, dtype=_TD_DTYPE, copy=False)
else:
key = _to_m8(key)
return self.values.searchsorted(key, side=side)
def is_type_compatible(self, typ):
return typ == self.inferred_type or typ == 'timedelta'
@property
def inferred_type(self):
return 'timedelta64'
@property
def dtype(self):
return _TD_DTYPE
@property
def is_all_dates(self):
return True
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if (not hasattr(other, 'inferred_type') or
other.inferred_type != 'timedelta64'):
try:
other = TimedeltaIndex(other)
except:
return False
return np.array_equal(self.asi8, other.asi8)
def insert(self, loc, item):
"""
Make new Index inserting new item at location
Parameters
----------
loc : int
item : object
if not either a Python datetime or a numpy integer-like, returned
Index dtype will be object rather than datetime.
Returns
-------
new_index : Index
"""
# try to convert if possible
if _is_convertible_to_td(item):
try:
item = Timedelta(item)
except:
pass
freq = None
if isinstance(item, Timedelta):
# check freq can be preserved on edge cases
if self.freq is not None:
if (loc == 0 or loc == -len(self)) and item + self.freq == self[0]:
freq = self.freq
elif (loc == len(self)) and item - self.freq == self[-1]:
freq = self.freq
item = _to_m8(item)
try:
new_tds = np.concatenate((self[:loc].asi8, [item.view(np.int64)],
self[loc:].asi8))
return TimedeltaIndex(new_tds, name=self.name, freq=freq)
except (AttributeError, TypeError):
# fall back to object index
if isinstance(item,compat.string_types):
return self.asobject.insert(loc, item)
raise TypeError("cannot insert TimedeltaIndex with incompatible label")
def delete(self, loc):
"""
Make a new DatetimeIndex with passed location(s) deleted.
Parameters
----------
loc: int, slice or array of ints
Indicate which sub-arrays to remove.
Returns
-------
new_index : TimedeltaIndex
"""
new_tds = np.delete(self.asi8, loc)
freq = 'infer'
if lib.is_integer(loc):
if loc in (0, -len(self), -1, len(self) - 1):
freq = self.freq
else:
if com.is_list_like(loc):
loc = lib.maybe_indices_to_slice(com._ensure_int64(np.array(loc)))
if isinstance(loc, slice) and loc.step in (1, None):
if (loc.start in (0, None) or loc.stop in (len(self), None)):
freq = self.freq
return TimedeltaIndex(new_tds, name=self.name, freq=freq)
TimedeltaIndex._add_numeric_methods()
TimedeltaIndex._add_logical_methods_disabled()
TimedeltaIndex._add_datetimelike_methods()
def _is_convertible_to_index(other):
""" return a boolean whether I can attempt conversion to a TimedeltaIndex """
if isinstance(other, TimedeltaIndex):
return True
elif (len(other) > 0 and
other.inferred_type not in ('floating', 'mixed-integer','integer',
'mixed-integer-float', 'mixed')):
return True
return False
def _is_convertible_to_td(key):
return isinstance(key, (DateOffset, timedelta, Timedelta, np.timedelta64, compat.string_types))
def _to_m8(key):
'''
Timedelta-like => dt64
'''
if not isinstance(key, Timedelta):
# this also converts strings
key = Timedelta(key)
# return an type that can be compared
return np.int64(key.value).view(_TD_DTYPE)
def _generate_regular_range(start, end, periods, offset):
stride = offset.nanos
if periods is None:
b = Timedelta(start).value
e = Timedelta(end).value
e += stride - e % stride
elif start is not None:
b = Timedelta(start).value
e = b + periods * stride
elif end is not None:
e = Timedelta(end).value + stride
b = e - periods * stride
else:
raise NotImplementedError
data = np.arange(b, e, stride, dtype=np.int64)
data = TimedeltaIndex._simple_new(data, None)
return data
def timedelta_range(start=None, end=None, periods=None, freq='D',
name=None, closed=None):
"""
Return a fixed frequency timedelta index, with day as the default
frequency
Parameters
----------
start : string or timedelta-like, default None
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
periods : integer or None, default None
If None, must specify start and end
freq : string or DateOffset, default 'D' (calendar daily)
Frequency strings can have multiples, e.g. '5H'
name : str, default None
Name of the resulting index
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
2 of start, end, or periods must be specified
Returns
-------
rng : TimedeltaIndex
"""
return TimedeltaIndex(start=start, end=end, periods=periods,
freq=freq, name=name,
closed=closed)
| 32.028884
| 106
| 0.574556
|
7782e529f7d9b33c44cd303006cef8b8198cde17
| 684
|
py
|
Python
|
backend/core/blueprints/logs/__init__.py
|
google/co-op-4-all
|
6bf68ea902da552e01c3647787f7212c541050e6
|
[
"Apache-2.0"
] | 3
|
2022-01-28T18:30:56.000Z
|
2022-03-30T17:39:05.000Z
|
backend/core/blueprints/logs/__init__.py
|
google/co-op-4-all
|
6bf68ea902da552e01c3647787f7212c541050e6
|
[
"Apache-2.0"
] | null | null | null |
backend/core/blueprints/logs/__init__.py
|
google/co-op-4-all
|
6bf68ea902da552e01c3647787f7212c541050e6
|
[
"Apache-2.0"
] | 1
|
2022-02-21T12:49:01.000Z
|
2022-02-21T12:49:01.000Z
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Blueprint
# Create Logs Blueprint
logs = Blueprint('logs', __name__)
from . import routes
| 36
| 74
| 0.76462
|
27fac59625edab7c0d38542078303c8031e9dfb1
| 15,302
|
py
|
Python
|
modules/coupling/fluid_flow0d.py
|
marchirschvogel/amb
|
af48b2a672cfcfb7a081020cda599fde85aa6b65
|
[
"BSD-4-Clause"
] | null | null | null |
modules/coupling/fluid_flow0d.py
|
marchirschvogel/amb
|
af48b2a672cfcfb7a081020cda599fde85aa6b65
|
[
"BSD-4-Clause"
] | null | null | null |
modules/coupling/fluid_flow0d.py
|
marchirschvogel/amb
|
af48b2a672cfcfb7a081020cda599fde85aa6b65
|
[
"BSD-4-Clause"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2019-2021, Dr.-Ing. Marc Hirschvogel
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import time, sys, math
import numpy as np
from dolfinx import fem
import ufl
from petsc4py import PETSc
import utilities
import solver_nonlin
import expression
from mpiroutines import allgather_vec
from fluid import FluidmechanicsProblem
from flow0d import Flow0DProblem
class FluidmechanicsFlow0DProblem():
def __init__(self, io_params, time_params_fluid, time_params_flow0d, fem_params, constitutive_models, model_params_flow0d, bc_dict, time_curves, coupling_params, io, mor_params={}, comm=None):
self.problem_physics = 'fluid_flow0d'
self.comm = comm
self.coupling_params = coupling_params
self.surface_vq_ids = self.coupling_params['surface_ids']
try: self.surface_p_ids = self.coupling_params['surface_p_ids']
except: self.surface_p_ids = self.surface_vq_ids
self.num_coupling_surf = len(self.surface_vq_ids)
try: self.cq_factor = self.coupling_params['cq_factor']
except: self.cq_factor = [1.]*self.num_coupling_surf
try: self.coupling_type = self.coupling_params['coupling_type']
except: self.coupling_type = 'monolithic_direct'
# assert that we do not have conflicting timings
time_params_flow0d['maxtime'] = time_params_fluid['maxtime']
time_params_flow0d['numstep'] = time_params_fluid['numstep']
# initialize problem instances (also sets the variational forms for the fluid problem)
self.pbs = FluidmechanicsProblem(io_params, time_params_fluid, fem_params, constitutive_models, bc_dict, time_curves, io, mor_params=mor_params, comm=self.comm)
self.pbf = Flow0DProblem(io_params, time_params_flow0d, model_params_flow0d, time_curves, coupling_params, comm=self.comm)
self.set_variational_forms_and_jacobians()
# defines the monolithic coupling forms for 0D flow and fluid mechanics
def set_variational_forms_and_jacobians(self):
self.cq, self.cq_old, self.dcq, self.dforce = [], [], [], []
self.coupfuncs, self.coupfuncs_old = [], []
if self.coupling_type == 'monolithic_lagrange':
# Lagrange multiplier stiffness matrix (currently treated with FD!)
self.K_lm = PETSc.Mat().createAIJ(size=(self.num_coupling_surf,self.num_coupling_surf), bsize=None, nnz=None, csr=None, comm=self.comm)
self.K_lm.setUp()
# Lagrange multipliers
self.lm, self.lm_old = self.K_lm.createVecLeft(), self.K_lm.createVecLeft()
# 3D fluxes
self.constr, self.constr_old = [], []
self.power_coupling, self.power_coupling_old = ufl.as_ufl(0), ufl.as_ufl(0)
# coupling variational forms and Jacobian contributions
for n in range(self.num_coupling_surf):
self.pr0D = expression.template()
self.coupfuncs.append(fem.Function(self.pbs.Vd_scalar)), self.coupfuncs_old.append(fem.Function(self.pbs.Vd_scalar))
self.coupfuncs[-1].interpolate(self.pr0D.evaluate), self.coupfuncs_old[-1].interpolate(self.pr0D.evaluate)
cq_, cq_old_ = ufl.as_ufl(0), ufl.as_ufl(0)
for i in range(len(self.surface_vq_ids[n])):
ds_vq = ufl.ds(subdomain_data=self.pbs.io.mt_b1, subdomain_id=self.surface_vq_ids[n][i], metadata={'quadrature_degree': self.pbs.quad_degree})
if self.coupling_params['coupling_quantity'][n] == 'flux':
assert(self.coupling_type == 'monolithic_direct')
cq_ += self.pbs.vf.flux(self.pbs.v, ds_vq)
cq_old_ += self.pbs.vf.flux(self.pbs.v_old, ds_vq)
elif self.coupling_params['coupling_quantity'][n] == 'pressure':
assert(self.coupling_type == 'monolithic_lagrange' and self.coupling_params['variable_quantity'][n] == 'flux')
cq_ += self.pbs.vf.flux(self.pbs.v, ds_vq)
cq_old_ += self.pbs.vf.flux(self.pbs.v_old, ds_vq)
else:
raise NameError("Unknown coupling quantity! Choose flux or pressure!")
self.cq.append(cq_), self.cq_old.append(cq_old_)
self.dcq.append(ufl.derivative(self.cq[-1], self.pbs.v, self.pbs.dv))
df_ = ufl.as_ufl(0)
for i in range(len(self.surface_p_ids[n])):
ds_p = ufl.ds(subdomain_data=self.pbs.io.mt_b1, subdomain_id=self.surface_p_ids[n][i], metadata={'quadrature_degree': self.pbs.quad_degree})
df_ += self.pbs.timefac*self.pbs.vf.surface(ds_p)
# add to fluid rhs contributions
self.power_coupling += self.pbs.vf.deltaP_ext_neumann_normal(self.coupfuncs[-1], ds_p)
self.power_coupling_old += self.pbs.vf.deltaP_ext_neumann_normal(self.coupfuncs_old[-1], ds_p)
self.dforce.append(df_)
# minus sign, since contribution to external power!
self.pbs.weakform_u += -self.pbs.timefac * self.power_coupling - (1.-self.pbs.timefac) * self.power_coupling_old
# add to fluid Jacobian
self.pbs.jac_uu += -self.pbs.timefac * ufl.derivative(self.power_coupling, self.pbs.v, self.pbs.dv)
if self.coupling_type == 'monolithic_lagrange':
# old Lagrange multipliers - initialize with initial pressures
self.pbf.cardvasc0D.initialize_lm(self.lm, self.pbf.time_params['initial_conditions'])
self.pbf.cardvasc0D.initialize_lm(self.lm_old, self.pbf.time_params['initial_conditions'])
def induce_perturbation(self):
if self.pbf.perturb_after_cylce > 0: # at least run through one healthy cycle
if self.pbf.ti.cycle[0] > self.pbf.perturb_after_cylce:
if self.comm.rank == 0:
print(">>> Induced cardiovascular disease type: %s" % (self.pbf.perturb_type))
sys.stdout.flush()
self.pbf.cardvasc0D.induce_perturbation(self.pbf.perturb_type, self.pbf.perturb_factor)
self.pbf.have_induced_pert = True
class FluidmechanicsFlow0DSolver():
def __init__(self, problem, solver_params_fluid, solver_params_flow0d):
self.pb = problem
self.solver_params_fluid = solver_params_fluid
self.solver_params_flow0d = solver_params_flow0d
# initialize nonlinear solver class
self.solnln = solver_nonlin.solver_nonlinear_constraint_monolithic(self.pb, self.pb.pbs.V_v, self.pb.pbs.V_p, self.solver_params_fluid, self.solver_params_flow0d)
def solve_problem(self):
start = time.time()
# print header
utilities.print_problem(self.pb.problem_physics, self.pb.pbs.comm, self.pb.pbs.ndof)
# read restart information
if self.pb.pbs.restart_step > 0:
self.pb.pbs.io.readcheckpoint(self.pb.pbs, self.pb.pbs.restart_step)
self.pb.pbf.readrestart(self.pb.pbs.simname, self.pb.pbs.restart_step)
self.pb.pbs.simname += '_r'+str(self.pb.pbs.restart_step)
# set pressure functions for old state - s_old already initialized by 0D flow problem
if self.pb.coupling_type == 'monolithic_direct':
self.pb.pbf.cardvasc0D.set_pressure_fem(self.pb.pbf.s_old, self.pb.pbf.cardvasc0D.v_ids, self.pb.pr0D, self.pb.coupfuncs_old)
if self.pb.coupling_type == 'monolithic_lagrange':
self.pb.pbf.cardvasc0D.set_pressure_fem(self.pb.lm_old, list(range(self.pb.num_coupling_surf)), self.pb.pr0D, self.pb.coupfuncs_old)
if self.pb.coupling_type == 'monolithic_direct':
# old 3D coupling quantities (volumes or fluxes)
for i in range(self.pb.num_coupling_surf):
cq = fem.assemble_scalar(self.pb.cq_old[i])
cq = self.pb.pbs.comm.allgather(cq)
self.pb.pbf.c.append(sum(cq)*self.pb.cq_factor[i])
if self.pb.coupling_type == 'monolithic_lagrange':
for i in range(self.pb.num_coupling_surf):
lm_sq, lm_old_sq = allgather_vec(self.pb.lm, self.pb.comm), allgather_vec(self.pb.lm_old, self.pb.comm)
self.pb.pbf.c.append(lm_sq[i])
con = fem.assemble_scalar(self.pb.cq_old[i])
con = self.pb.pbs.comm.allgather(con)
self.pb.constr.append(sum(con)*self.pb.cq_factor[i])
self.pb.constr_old.append(sum(con)*self.pb.cq_factor[i])
if bool(self.pb.pbf.chamber_models):
self.pb.pbf.y = []
for ch in ['lv','rv','la','ra']:
if self.pb.pbf.chamber_models[ch]['type']=='0D_elast': self.pb.pbf.y.append(self.pb.pbs.ti.timecurves(self.pb.pbf.chamber_models[ch]['activation_curve'])(self.pb.pbs.t_init))
if self.pb.pbf.chamber_models[ch]['type']=='0D_elast_prescr': self.pb.pbf.y.append(self.pb.pbs.ti.timecurves(self.pb.pbf.chamber_models[ch]['elastance_curve'])(self.pb.pbs.t_init))
if self.pb.pbf.chamber_models[ch]['type']=='0D_prescr': self.pb.pbf.c.append(self.pb.pbs.ti.timecurves(self.pb.pbf.chamber_models[ch]['prescribed_curve'])(self.pb.pbs.t_init))
# initially evaluate 0D model at old state
self.pb.pbf.cardvasc0D.evaluate(self.pb.pbf.s_old, self.pb.pbs.t_init, self.pb.pbf.df_old, self.pb.pbf.f_old, None, None, self.pb.pbf.c, self.pb.pbf.y, self.pb.pbf.aux_old)
# consider consistent initial acceleration
if self.pb.pbs.timint != 'static' and self.pb.pbs.restart_step == 0:
# weak form at initial state for consistent initial acceleration solve
weakform_a = self.pb.pbs.deltaP_kin_old + self.pb.pbs.deltaP_int_old - self.pb.pbs.deltaP_ext_old - self.pb.power_coupling_old
jac_a = ufl.derivative(weakform_a, self.pb.pbs.a_old, self.pb.pbs.dv) # actually linear in a_old
# solve for consistent initial acceleration a_old
self.solnln.solve_consistent_ini_acc(weakform_a, jac_a, self.pb.pbs.a_old)
# write mesh output
self.pb.pbs.io.write_output(self.pb.pbs, writemesh=True)
# fluid 0D flow main time loop
for N in range(self.pb.restart_step+1, self.pb.numstep_stop+1):
wts = time.time()
# current time
t = N * self.pb.pbs.dt
# offset time for multiple cardiac cycles
t_off = (self.pb.pbf.ti.cycle[0]-1) * self.pb.pbf.cardvasc0D.T_cycl # zero if T_cycl variable is not specified
# set time-dependent functions
self.pb.pbs.ti.set_time_funcs(self.pb.pbs.ti.funcs_to_update, self.pb.pbs.ti.funcs_to_update_vec, t-t_off)
# activation curves for 0D chambers (if present)
self.pb.pbf.evaluate_activation(t-t_off)
# solve
self.solnln.newton(self.pb.pbs.v, self.pb.pbs.p, self.pb.pbf.s, t-t_off)
# get midpoint dof values for post-processing (has to be called before update!)
self.pb.pbf.cardvasc0D.midpoint_avg(self.pb.pbf.s, self.pb.pbf.s_old, self.pb.pbf.s_mid, self.pb.pbf.theta_ost), self.pb.pbf.cardvasc0D.midpoint_avg(self.pb.pbf.aux, self.pb.pbf.aux_old, self.pb.pbf.aux_mid, self.pb.pbf.theta_ost)
# write output
self.pb.pbs.io.write_output(self.pb.pbs, N=N, t=t)
# raw txt file output of 0D model quantities
if self.pb.pbf.write_results_every_0D > 0 and N % self.pb.pbf.write_results_every_0D == 0:
self.pb.pbf.cardvasc0D.write_output(self.pb.pbf.output_path_0D, t, self.pb.pbf.s_mid, self.pb.pbf.aux_mid, self.pb.pbs.simname)
# update time step - fluid and 0D model
self.pb.pbs.ti.update_timestep(self.pb.pbs.v, self.pb.pbs.v_old, self.pb.pbs.a_old, self.pb.pbs.p, self.pb.pbs.p_old, self.pb.pbs.ti.funcs_to_update, self.pb.pbs.ti.funcs_to_update_old, self.pb.pbs.ti.funcs_to_update_vec, self.pb.pbs.ti.funcs_to_update_vec_old)
self.pb.pbf.cardvasc0D.update(self.pb.pbf.s, self.pb.pbf.df, self.pb.pbf.f, self.pb.pbf.s_old, self.pb.pbf.df_old, self.pb.pbf.f_old, self.pb.pbf.aux, self.pb.pbf.aux_old)
# update old pressures on fluid
if self.pb.coupling_type == 'monolithic_direct':
self.pb.pbf.cardvasc0D.set_pressure_fem(self.pb.pbf.s_old, self.pb.pbf.cardvasc0D.v_ids, self.pb.pr0D, self.pb.coupfuncs_old)
if self.pb.coupling_type == 'monolithic_lagrange':
self.pb.lm.assemble(), self.pb.lm_old.axpby(1.0, 0.0, self.pb.lm)
self.pb.pbf.cardvasc0D.set_pressure_fem(self.pb.lm_old, list(range(self.pb.num_coupling_surf)), self.pb.pr0D, self.pb.coupfuncs_old)
# update old 3D fluxes
for i in range(self.pb.num_coupling_surf):
self.pb.constr_old[i] = self.pb.constr[i]
# solve time for time step
wte = time.time()
wt = wte - wts
# print to screen
self.pb.pbf.cardvasc0D.print_to_screen(self.pb.pbf.s_mid,self.pb.pbf.aux_mid)
# print time step info to screen
self.pb.pbf.ti.print_timestep(N, t, self.solnln.sepstring, self.pb.pbs.numstep, wt=wt)
# check for periodicity in cardiac cycle and stop if reached (only for syspul* models - cycle counter gets updated here)
is_periodic = self.pb.pbf.cardvasc0D.cycle_check(self.pb.pbf.s, self.pb.pbf.sTc, self.pb.pbf.sTc_old, t-t_off, self.pb.pbf.ti.cycle, self.pb.pbf.ti.cycleerror, self.pb.pbf.eps_periodic, check=self.pb.pbf.periodic_checktype, inioutpath=self.pb.pbf.output_path_0D, nm=self.pb.pbs.simname, induce_pert_after_cycl=self.pb.pbf.perturb_after_cylce)
# induce some disease/perturbation for cardiac cycle (i.e. valve stenosis or leakage)
if self.pb.pbf.perturb_type is not None and not self.pb.pbf.have_induced_pert: self.pb.induce_perturbation()
# write restart info - old and new quantities are the same at this stage
self.pb.pbs.io.write_restart(self.pb.pbs, N)
# write 0D restart info - old and new quantities are the same at this stage (except cycle values sTc)
if self.pb.pbs.io.write_restart_every > 0 and N % self.pb.pbs.io.write_restart_every == 0:
self.pb.pbf.writerestart(self.pb.pbs.simname, N)
if is_periodic:
if self.pb.comm.rank == 0:
print("Periodicity reached after %i heart cycles with cycle error %.4f! Finished. :-)" % (self.pb.pbf.ti.cycle[0]-1,self.pb.pbf.ti.cycleerror[0]))
sys.stdout.flush()
break
if self.pb.comm.rank == 0: # only proc 0 should print this
print('Time for computation: %.4f s (= %.2f min)' % ( time.time()-start, (time.time()-start)/60. ))
sys.stdout.flush()
| 52.765517
| 354
| 0.646582
|
84523088794badd63368a152bdb2b535d8a90380
| 8,438
|
py
|
Python
|
tensr-flow-cat-dog.py
|
ypraveen07/Neural_Network1
|
69cbcd42e8941979ac6a10e76e4aea327ebbff96
|
[
"Apache-2.0"
] | null | null | null |
tensr-flow-cat-dog.py
|
ypraveen07/Neural_Network1
|
69cbcd42e8941979ac6a10e76e4aea327ebbff96
|
[
"Apache-2.0"
] | null | null | null |
tensr-flow-cat-dog.py
|
ypraveen07/Neural_Network1
|
69cbcd42e8941979ac6a10e76e4aea327ebbff96
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
import matplotlib.pyplot as plt
import os
cwd = os.getcwd()
print(cwd)
a = tf.truncated_normal([16,128,128,3])
sess = tf.Session()
sess.run(tf.global_variables_initializer())
sess.run(tf.shape(a))
b=tf.reshape(a,[16,128*128*3])
sess.run(tf.shape(b))
print(a)
import os
os.system("python D:\\files and documents\\pybasics\\dataset.py")
print("loaded py file")
import dataset
print('ld Dataset')
#classes = ['dogs', 'cats']
#num_classes = len(classes)
#import dataset
import tensorflow as tf
import time
from datetime import timedelta
import math
import random
import numpy as np
import os
#Adding Seed so that random initialization is consistent
from numpy.random import seed
seed(1)
from tensorflow import set_random_seed
set_random_seed(2)
batch_size = 32
#Prepare input data
classes1 = os.listdir("D:\\files and documents\\pybasics\\evaluation")
classes = ['dog', 'cat']
num_classes = len(classes)
print(num_classes)
print(classes)
# 20% of the data will automatically be used for validation
validation_size = 0.2
img_size = 128
num_channels = 3
train_path="D:\\files and documents\\pybasics\\evaluation"
# We shall load all the training and validation images and labels into memory using openCV and use that during training
data = dataset.read_train_sets(train_path, img_size, classes1, validation_size=validation_size)
print(data)
print("Complete reading input data. Will Now print a snippet of it")
print("Number of files in Training-set:\t\t{}".format(len(data.train.labels)))
#
tf.summary.FileWriterCache.clear()
session = tf.Session()
# tf.summary.FileWriter('board_beginner',sess.graph) # magic board
logdir = "D:\\files and documents\\pybasics\\dt"
writer = tf.summary.FileWriter(logdir) # create writer
writer.add_graph(session.graph)
x = tf.placeholder(tf.float32, shape=[None, img_size,img_size,num_channels], name='x')
## labels
y_true = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_true')
y_true_cls = tf.argmax(y_true, dimension=1)
##Network graph params
filter_size_conv1 = 3
num_filters_conv1 = 32
filter_size_conv2 = 3
num_filters_conv2 = 32
filter_size_conv3 = 3
num_filters_conv3 = 64
fc_layer_size = 128
def create_weights(shape):
return tf.Variable(tf.truncated_normal(shape, stddev=0.05))
def create_biases(size):
return tf.Variable(tf.constant(0.05, shape=[size]))
def create_convolutional_layer(input,
num_input_channels,
conv_filter_size,
num_filters):
## We shall define the weights that will be trained using create_weights function.
weights = create_weights(shape=[conv_filter_size, conv_filter_size, num_input_channels, num_filters])
## We create biases using the create_biases function. These are also trained.
biases = create_biases(num_filters)
## Creating the convolutional layer
layer = tf.nn.conv2d(input=input,
filter=weights,
strides=[1, 1, 1, 1],
padding='SAME')
layer += biases
## We shall be using max-pooling.
layer = tf.nn.max_pool(value=layer,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
## Output of pooling is fed to Relu which is the activation function for us.
layer = tf.nn.relu(layer)
tf.summary.histogram("weight",weights)
tf.summary.histogram("Bias",biases )
tf.summary.histogram("activation",layer)
return layer
def create_flatten_layer(layer):
#We know that the shape of the layer will be [batch_size img_size img_size num_channels]
# But let's get it from the previous layer.
layer_shape = layer.get_shape()
## Number of features will be img_height * img_width* num_channels. But we shall calculate it in place of hard-coding it.
num_features = layer_shape[1:4].num_elements()
## Now, we Flatten the layer so we shall have to reshape to num_features
layer = tf.reshape(layer, [-1, num_features])
return layer
def create_fc_layer(input,
num_inputs,
num_outputs,
use_relu=True):
#Let's define trainable weights and biases.
weights = create_weights(shape=[num_inputs, num_outputs])
biases = create_biases(num_outputs)
# Fully connected layer takes input x and produces wx+b.Since, these are matrices, we use matmul function in Tensorflow
layer = tf.matmul(input, weights) + biases
if use_relu:
layer = tf.nn.relu(layer)
tf.summary.histogram("weight",weights)
tf.summary.histogram("Bias",biases )
tf.summary.histogram("activation",layer)
return layer
layer_conv1 = create_convolutional_layer(input=x,
num_input_channels=num_channels,
conv_filter_size=filter_size_conv1,
num_filters=num_filters_conv1)
layer_conv2 = create_convolutional_layer(input=layer_conv1,
num_input_channels=num_filters_conv1,
conv_filter_size=filter_size_conv2,
num_filters=num_filters_conv2)
layer_conv3= create_convolutional_layer(input=layer_conv2,
num_input_channels=num_filters_conv2,
conv_filter_size=filter_size_conv3,
num_filters=num_filters_conv3)
layer_flat = create_flatten_layer(layer_conv3)
layer_fc1 = create_fc_layer(input=layer_flat,
num_inputs=layer_flat.get_shape()[1:4].num_elements(),
num_outputs=fc_layer_size,
use_relu=True)
layer_fc2 = create_fc_layer(input=layer_fc1,
num_inputs=fc_layer_size,
num_outputs=num_classes,
use_relu=False)
y_pred = tf.nn.softmax(layer_fc2,name='y_pred')
y_pred_cls = tf.argmax(y_pred, dimension=1)
session.run(tf.global_variables_initializer())
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=layer_fc2,
labels=y_true)
cost = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost)
correct_prediction = tf.equal(y_pred_cls, y_true_cls)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
session.run(tf.global_variables_initializer())
tf.summary.scalar("cross-entropy", cross_entropy)
tf.summary.scalar("accuracy",accuracy)
def show_progress(epoch, feed_dict_train, feed_dict_validate, val_loss):
acc = session.run(accuracy, feed_dict=feed_dict_train)
val_acc = session.run(accuracy, feed_dict=feed_dict_validate)
msg = "Training Epoch {0} --- Training Accuracy: {1:>6.1%}, Validation Accuracy: {2:>6.1%}, Validation Loss: {3:.3f}"
print(msg.format(epoch + 1, acc, val_acc, val_loss))
total_iterations = 0
saverx = tf.train.Saver()
def train(num_iteration):
global total_iterations
for i in range(total_iterations,
total_iterations + num_iteration):
x_batch, y_true_batch, _, cls_batch = data.train.next_batch(batch_size)
x_valid_batch, y_valid_batch, _, valid_cls_batch = data.valid.next_batch(batch_size)
feed_dict_tr = {x: x_batch,
y_true: y_true_batch}
feed_dict_val = {x: x_valid_batch,
y_true: y_valid_batch}
session.run(optimizer, feed_dict=feed_dict_tr)
if i % int(data.train.num_examples/batch_size) == 0:
#s=session.run(merged_summary,feed_dict={x: x_batch, y_true: y_true_batch})
# write.add_summary(s,i)
val_loss = session.run(cost, feed_dict=feed_dict_val)
epoch = int(i / int(data.train.num_examples/batch_size))
show_progress(epoch, feed_dict_tr, feed_dict_val, val_loss)
saverx.save(session, "D:\\files and documents\\pybasics\\dt\\dog-cat-model")
total_iterations += num_iteration
train(num_iteration=100)
| 25.569697
| 126
| 0.651221
|
3e543d1c1a5cb06dc39364838f05f4310a58c14d
| 780
|
py
|
Python
|
Commands/OpenConfig.py
|
Libai2333/LitsQuestions
|
be98f74a7909325416848d97e16fe17028e19c98
|
[
"MIT"
] | null | null | null |
Commands/OpenConfig.py
|
Libai2333/LitsQuestions
|
be98f74a7909325416848d97e16fe17028e19c98
|
[
"MIT"
] | null | null | null |
Commands/OpenConfig.py
|
Libai2333/LitsQuestions
|
be98f74a7909325416848d97e16fe17028e19c98
|
[
"MIT"
] | null | null | null |
from Commands.Base import LitsQuestionsCommand
from PythonSheep.FileSheep.AutoOpen import AutoOpen
from WareHouse import wareHouse
class OpenConfig(LitsQuestionsCommand):
def run(self, userNowUsingLanguage:str, mainWareHouse:wareHouse):
AutoOpenControler = AutoOpen()
print(mainWareHouse.languagesContents[userNowUsingLanguage]["commandsMessage"]["openConfig"]["opening_TipsMessage"])
AutoOpenControler.UniversalFileOpen_App("./Config/GlobalSittings.json")
# 处理 Press Enter key continue
print(mainWareHouse.languagesContents[userNowUsingLanguage]["commandsMessage"]["openConfig"]["openComplete_TipsMessage"])
input(mainWareHouse.languagesContents[userNowUsingLanguage]["globalMessageTips"]["anyKeyContinue_TipsMessage"])
| 41.052632
| 129
| 0.791026
|
0e4252c0c4a980bbc828c01bdb71892bee54e971
| 5,131
|
py
|
Python
|
thonny/plugins/about.py
|
webduino-cn/thonny
|
74da2278aa018eafec697c2b92e2355237669ecd
|
[
"MIT"
] | 1
|
2021-06-12T22:24:40.000Z
|
2021-06-12T22:24:40.000Z
|
Thonny/Lib/site-packages/thonny/plugins/about.py
|
Pydiderot/pydiderotIDE
|
a42fcde3ea837ae40c957469f5d87427e8ce46d3
|
[
"MIT"
] | 30
|
2019-01-04T10:14:56.000Z
|
2020-10-12T14:00:31.000Z
|
Thonny/Lib/site-packages/thonny/plugins/about.py
|
Pydiderot/pydiderotIDE
|
a42fcde3ea837ae40c957469f5d87427e8ce46d3
|
[
"MIT"
] | 3
|
2018-11-24T14:00:30.000Z
|
2019-07-02T02:32:26.000Z
|
# -*- coding: utf-8 -*-
import datetime
import platform
import tkinter as tk
import tkinter.font
import webbrowser
from tkinter import ttk
import thonny
from thonny import get_workbench, ui_utils
from thonny.misc_utils import get_python_version_string
from thonny.ui_utils import CommonDialog
class AboutDialog(CommonDialog):
def __init__(self, master):
super().__init__(master)
main_frame = ttk.Frame(self)
main_frame.grid(sticky=tk.NSEW, ipadx=15, ipady=15)
main_frame.rowconfigure(0, weight=1)
main_frame.columnconfigure(0, weight=1)
self.title(_("About Thonny"))
self.resizable(height=tk.FALSE, width=tk.FALSE)
self.protocol("WM_DELETE_WINDOW", self._ok)
# bg_frame = ttk.Frame(self) # gives proper color in aqua
# bg_frame.grid()
heading_font = tkinter.font.nametofont("TkHeadingFont").copy()
heading_font.configure(size=19, weight="bold")
heading_label = ttk.Label(
main_frame, text="Thonny " + thonny.get_version(), font=heading_font
)
heading_label.grid()
url = "https://thonny.org"
url_font = tkinter.font.nametofont("TkDefaultFont").copy()
url_font.configure(underline=1)
url_label = ttk.Label(
main_frame, text=url, style="Url.TLabel", cursor="hand2", font=url_font
)
url_label.grid()
url_label.bind("<Button-1>", lambda _: webbrowser.open(url))
if platform.system() == "Linux":
try:
import distro # distro don't need to be installed
system_desc = distro.name(True)
except ImportError:
system_desc = "Linux"
if "32" not in system_desc and "64" not in system_desc:
system_desc += " " + self.get_os_word_size_guess()
else:
system_desc = (
platform.system() + " " + platform.release() + " " + self.get_os_word_size_guess()
)
platform_label = ttk.Label(
main_frame,
justify=tk.CENTER,
text=system_desc
+ "\n"
+ "Python "
+ get_python_version_string()
+ "Tk "
+ ui_utils.get_tk_version_str(),
)
platform_label.grid(pady=20)
credits_label = ttk.Label(
main_frame,
text=_(
"Made in\n"
+ "University of Tartu, Estonia,\n"
+ "with the help from\n"
+ "open-source community,\n"
+ "Raspberry Pi Foundation\n"
+ "and Cybernetica AS"
),
style="Url.TLabel",
cursor="hand2",
font=url_font,
justify="center",
)
credits_label.grid()
credits_label.bind(
"<Button-1>",
lambda _: webbrowser.open("https://github.com/thonny/thonny/blob/master/CREDITS.rst"),
)
license_font = tkinter.font.nametofont("TkDefaultFont").copy()
license_font.configure(size=7)
license_label = ttk.Label(
main_frame,
text="Copyright (©) "
+ str(datetime.datetime.now().year)
+ " Aivar Annamaa\n"
+ _(
"This program comes with\n"
+ "ABSOLUTELY NO WARRANTY!\n"
+ "It is free software, and you are welcome to\n"
+ "redistribute it under certain conditions, see\n"
+ "https://opensource.org/licenses/MIT\n"
+ "for details"
),
justify=tk.CENTER,
font=license_font,
)
license_label.grid(pady=20)
ok_button = ttk.Button(main_frame, text="OK", command=self._ok, default="active")
ok_button.grid(pady=(0, 15))
ok_button.focus_set()
self.bind("<Return>", self._ok, True)
self.bind("<Escape>", self._ok, True)
def _ok(self, event=None):
self.destroy()
def get_os_word_size_guess(self):
if "32" in platform.machine() and "64" not in platform.machine():
return "(32-bit)"
elif "64" in platform.machine() and "32" not in platform.machine():
return "(64-bit)"
else:
return ""
def load_plugin() -> None:
def open_about(*args):
ui_utils.show_dialog(AboutDialog(get_workbench()))
def open_url(url):
# webbrowser.open returns bool, but add_command expects None
webbrowser.open(url)
get_workbench().add_command(
"changelog",
"help",
_("Version history"),
lambda: open_url("https://github.com/thonny/thonny/blob/master/CHANGELOG.rst"),
group=60,
)
get_workbench().add_command(
"issues",
"help",
_("Report problems"),
lambda: open_url("https://github.com/thonny/thonny/issues/new"),
group=60,
)
get_workbench().add_command("about", "help", _("About Thonny"), open_about, group=61)
# For Mac
get_workbench().createcommand("tkAboutDialog", open_about)
| 31.869565
| 98
| 0.565777
|
dc83787eba0b68c3ef84c7307e4a781395672900
| 8,448
|
py
|
Python
|
examples/_attic/adapt_agent/adapt_agent.py
|
hiway/python-zentropi
|
006f4a6de8b6691477fa1416476cd6cef665c918
|
[
"Apache-2.0"
] | 5
|
2017-05-28T18:15:38.000Z
|
2021-07-15T22:31:33.000Z
|
examples/_attic/adapt_agent/adapt_agent.py
|
hiway/python-zentropi
|
006f4a6de8b6691477fa1416476cd6cef665c918
|
[
"Apache-2.0"
] | null | null | null |
examples/_attic/adapt_agent/adapt_agent.py
|
hiway/python-zentropi
|
006f4a6de8b6691477fa1416476cd6cef665c918
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
import datetime
from pprint import pprint
from string import punctuation
import os
import random
import re
import yaml
from adapt.intent import IntentBuilder
from adapt.engine import IntentDeterminationEngine
from chronyk import chronyk
from dateparser import parse
from zentropi import Agent, Frame, KINDS, on_event, on_message
from zentropi.handlers import Handler, HandlerRegistry
from zentropi.utils import StopAgent, run_agents_forever
Intent = IntentBuilder
def get_synsets(phrases):
from nltk.corpus import wordnet
for phrase in phrases:
for synset in wordnet.synsets(phrase):
for lemma in synset.lemmas():
name = lemma.name()
if '_' in name:
continue
yield name
class Entity(object):
def __init__(self, name, *phrases, regex: str = None, expand=0):
self.name = name
self.phrases = list(phrases)
self.regex = regex
if not phrases and not regex:
self.phrases = [name]
if expand:
synsets = get_synsets(self.phrases[:3])
self.phrases += synsets
words = [w for w in self.phrases if ' ' not in w]
normalized = set()
for phrase in self.phrases:
normalized.add(phrase)
self.phrases = normalized
def __repr__(self):
return 'Entity(name={!r}, regex={!r}, phrases={!r})'.format(self.name, self.regex, list(self.phrases)[:3])
def bootstrap_adapt(intents, entities):
engine = IntentDeterminationEngine()
for entity in entities:
for phrase in entity.phrases:
engine.register_entity(phrase, entity.name)
if entity.regex:
engine.register_regex_entity(entity.regex)
for intent in intents:
engine.register_intent_parser(intent.build())
return engine
class AdaptAgent(Agent):
def __init__(self, name=None):
super().__init__(name=name)
self._intent_registry = HandlerRegistry()
self.engine = None
self.entities = set()
self.intents = set()
@on_event('*** started')
def startup(self, event):
for handler in self._intent_registry.handler_objects:
for entity in handler._handler.entities:
self.entities.add(entity)
self.intents.add(handler._handler.intent)
self.engine = bootstrap_adapt(self.intents, self.entities)
@on_message('*')
def process_all_messages(self, message):
if not self.engine:
return
text = message.text
if not text:
return
for intent in self.engine.determine_intent(text):
if intent.get('confidence') > 0.7:
frame = Frame(intent.get('intent_type'))
frame, handlers = self._intent_registry.match_exact(frame)
handler = list(handlers)[0]
message.data.update(intent.items())
return handler(message)
def on_intent(self, intent, *entities):
def wrapper(handler):
name_ = intent.name
handler.intent = intent
handler.entities = entities or []
handler_obj = Handler(name=name_, handler=handler, kind=KINDS.MESSAGE)
self._intent_registry.add_handler(name_, handler_obj)
return handler
return wrapper
RESPONSES = {
'greet': {
'*': ['hey', 'hi', 'hello'],
'morning': ['good morning!'],
'afternoon': ['good afternoon!'],
'evening': ['good evening!'],
# 'night': ['good night'],
# 'bye': ['see you', 'until later', 'bye'],
},
'smalltalk': {
'*': ['wassup?', 'how is it going?', 'how can I help you?', ':)']
}
}
class Response(object):
def __init__(self):
self._text = ''
self._greet = False
self._finalized = False
self._sentences = 0
@staticmethod
def has_context(tag, context):
return context in RESPONSES[tag]
@staticmethod
def one_of(tag, context='*'):
return str(random.choice(RESPONSES[tag][context]))
@property
def text(self):
# if not self._finalized:
text_ = self._text.strip()
if self._greet:
if text_[-1] == ',':
text_ = text_[:-1]
else:
if text_[-1] == ',':
text_ = text_[:-1] + '.'
elif text_[-1] not in punctuation:
text_ = text_ + '.'
return text_
def greet(self, context=None):
if self._greet:
raise AssertionError('Already greeted.')
tag = 'greet'
if context and self.has_context(tag, context):
greeting_ = self.one_of(tag, context)
else:
greeting_ = self.one_of(tag)
if self._text:
self._text = '{}, {}'.format(greeting_.capitalize(), self._text)
else:
self._text = '{}'.format(greeting_.capitalize())
self._greet = True
return self
def say(self, text: str, punctuate=True):
text = text.strip()
if not text:
return
if not self._greet:
text = text.capitalize()
text_ = self._text
if punctuate and text_ and text_[-1] not in punctuation:
if self._greet and self._sentences == 0:
if text[0] in [':', ';']:
self._text = '{} {}'.format(self._text, text)
else:
self._text = '{}, {}'.format(self._text, text)
else:
self._text = '{}. {}'.format(self._text, text)
else:
self._text = '{} {}'.format(self._text, text)
self._sentences += 1
return self
def random(self, tag, context):
context = context or '*'
return self.say(self.one_of(tag, context))
# ---- main ----
agent = AdaptAgent()
def parse_datetime(datetime_str: str, allow_future=True, allow_past=True):
date_time = chronyk.Chronyk(datetime_str, allowfuture=allow_future, allowpast=allow_past)
return date_time
@agent.on_intent(Intent('greeting_intent').require('greeting').optionally('greeting_mod').optionally('greeting_context'))
def greeting(message):
response = Response()
context = message.data.greeting_context
return response.greet(context).text
@agent.on_intent(Intent('greeting_intent_1').optionally('greeting_mod').require('greeting_context'))
def greeting_1(message):
response = Response()
context = message.data.greeting_context
return response.greet(context).text
@agent.on_intent(Intent('sunset').require('question_things').require('sunset') \
.optionally('day_modifier').optionally('day'))
def sunset(message):
pprint(message.data)
when_str = message.data.day or ''
if message.data.day_modifier:
when_str = message.data.day_modifier + ' ' + when_str
when = parse(when_str)
if not when:
return str(agent.city.sun(date=datetime.datetime.now(), local=True)['sunset'])
return str(agent.city.sun(date=when, local=True)['sunset'])
@agent.on_intent(Intent('weather_intent').require('question_things').require('weather').optionally('day'))
def weather(message):
wtype = message.data.weather_type
pprint(message.data)
return 'Weather is nice...'
@agent.on_intent(Intent('about_intent').require('question_agents').require('subject'))
def generic_about_question(message):
pprint(message.data)
return 'About that...'
@agent.on_intent(Intent('reminder_intent').require('reminder_task'))
def set_reminder(message):
pprint(message.data)
match = re.search(r'[\d]+[dmhs]', message.text)
if not match:
return 'Include a time-delta: (number)(d/h/m/s)'
time_delta_str = match.group()
task = str.replace(message.text, time_delta_str, '')
task = task.replace(message.data.reminder_task, '')
task = task.strip()
if not task:
return 'You did not mention what to remind you about...'
return 'I will try to remember... {}'.format(task)
def load_entities(file_path: str):
with open(os.path.abspath(file_path)) as infile:
entities_ = yaml.safe_load(infile.read())
return [Entity(name, *words, expand=0) for name, words in entities_.items()]
for entity in load_entities('./entities.yml'):
agent.entities.add(entity)
run_agents_forever(agent, shell=True)
| 31.879245
| 121
| 0.611624
|
53bf3165dbbbba13f90a1d4facec243d2db22863
| 7,444
|
py
|
Python
|
rabbitMQ/rbt.py
|
huynhp24/Project-Theia
|
cfc0eba342c27050905e0ec34267b37356bfa725
|
[
"MIT"
] | null | null | null |
rabbitMQ/rbt.py
|
huynhp24/Project-Theia
|
cfc0eba342c27050905e0ec34267b37356bfa725
|
[
"MIT"
] | 3
|
2021-04-23T18:00:00.000Z
|
2021-05-03T21:41:26.000Z
|
rabbitMQ/rbt.py
|
huynhp24/Project-Theia
|
cfc0eba342c27050905e0ec34267b37356bfa725
|
[
"MIT"
] | null | null | null |
import threading
import time
import pika, sys, os
import boto3
from PIL import Image
import urllib.parse
import re
from urllib.request import Request, urlopen
from io import BytesIO
import shutil
import requests
sys.path.insert(1,'/opt/theia/serverside')
import labels, textdetect, Nat_Lang_Gen, translate
# from serverside import labels, textdetect
import json, time
import mysql.connector
import configparser
from os import path
import logging
from logging.handlers import RotatingFileHandler
# Reading config file
config = configparser.ConfigParser()
config.read('/opt/theia/config.ini')
config.sections()
try:
if path.exists(sys.argv[1]):
config.read(sys.argv[1])
except IndexError:
if path.exists('/opt/theia/config.ini'):
config.read('/opt/theia/config.ini')
elif path.exists('config.ini'):
config.read('config.ini')
else:
print("No config file found")
# setting up logging
logfile = config['logging']['logdir'] + "/rabbit_py.log"
log_lvl = config['logging']['loglevel']
log_out = config['logging']['log_stream_to_console']
my_handler = RotatingFileHandler(logfile,
mode='a', maxBytes=5 * 1024 * 1024, backupCount=2, encoding=None, delay=0)
my_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(funcName)s (%(lineno)d) %(message)s'))
l = logging.getLogger(__name__)
l.setLevel(log_lvl.upper())
l.addHandler(my_handler)
if log_out.upper() == 'TRUE':
l.addHandler(logging.StreamHandler())
l.info("Starting backing end processors")
s3 = boto3.client('s3')
S3PATH = config['amazon']['bucket']
REGION = config['amazon']['region']
SOURCEDIR = config['default']['image_upload_folder']
# Setup Database Connection
db_host = config['database']['host']
db_port = config['database']['port']
db_user = config['database']['user']
db_password = config['database']['password']
db_dbname = config['database']['dbname']
def storeToDB(imgFile, id, lan):
conn = mysql.connector.connect(user= db_user, password= db_password,
host= db_host,
database= db_dbname)
cur = conn.cursor()
# checking if database's connection was successful
if (conn):
l.info("Database connection successful")
url = "https://%s.s3-%s.amazonaws.com/%s" % (S3PATH, REGION, imgFile)
filestamp = time.strftime('%Y-%m-%d-%I:%M')
with open('label.json', 'r') as f:
labelResult = json.load(f)
labelJson = json.dumps(labelResult)
with open('imgText.json', 'r') as f:
textResult = json.load(f)
# make json file from dict to string
textJson = json.dumps(textResult)
sen = Nat_Lang_Gen.Run(labelResult, textResult)
audio_file, translate_text = translate.textToSpeech(sen, id, lan)
l.info("Incoming url " + str(audio_file))
tsql = "insert into jsondata(uuid, image_Location, label_list, detect_text, sentence, audio_Location, file_date) values (%s, %s, %s, %s, %s, %s, %s)"
cur.execute(tsql, (id, url, labelJson, textJson, translate_text, audio_file, filestamp))
l.info('Storing into database: ' + str(id) + ', ' + str(url) + ', ' + str(translate_text) + ', ' + str(audio_file ))
conn.commit()
# removing the image file on server once uploads to S3 bucket, so it won't overload the server
os.remove(imgFile)
else:
l.error("Database connection unsuccesful.")
cur.close()
conn.close()
def imgPathToS3(imgPath, uuid, lan):
imgFile = os.path.basename(imgPath)
print('--------')
path = os.path.join(SOURCEDIR, imgFile)
l.info('Joining the path for img upload to rabbitMQ dir: ' + path)
with open(imgFile, "rb") as f:
s3.upload_fileobj(f, S3PATH, imgFile)
l.info('Had successfully upload ' + imgPath + " to s3 bucket : " + S3PATH)
textInImage = textdetect.detect_text(imgFile, S3PATH)
labelResult = labels.detect_labels(imgFile, S3PATH)
with open('label.json', 'w') as jf:
json.dump(labelResult, jf)
with open('imgText.json', 'w') as j:
json.dump(textInImage, j)
storeToDB(imgFile, uuid, lan)
def checkingImgURL(img, uuid, lan):
try:
req = Request(img, headers={'User-Agent': 'Mozilla/5.0'}) # to unblock server security
response = requests.get(img)
l.info(" Responsing url request: " +str(response))
url = Image.open(BytesIO(response.content))
except:
# Warning this is not a image file. Please double check. Only accept .png and .jpg and .jpeg
l.warning(" Not image file. Double check")
else:
webpage = urlopen(req).read()
# this if statement is to strip any string after url format (.jpg/.png/ .jpeg)
if (img.find('.png') or img.find('.jpg') or img.find('.jpeg')):
# this function make the url as a list
url = re.findall(r'(?:http\:|https\:)?\/\/.*\.(?:png|jpg|jpeg)', img)
imURL = ''.join(url)
imgFile = os.path.basename(imURL)
with open(imgFile, 'wb') as f:
f.write(webpage)
imgurl = Image.open(imgFile)
imgurl.close()
l.info(" The URL provided is an image: " + imgFile)
imgPathToS3(imgFile, uuid, lan)
else:
l.error(" Not an image file. Only accpet URL ends with .png or .jpg or .jpeg")
def receive(rmq_q):
while True:
try:
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.queue_declare(queue=rmq_q)
def callback(ch, method, properties, body):
img = body.decode("utf-8")
l.info(rmq_q)
# convert string to dictionary
res = json.loads(img)
imgname = res['msg']
uuid = res['uuid']
lan = res['language']
l.info(" receiving UUID : " + uuid)
l.info(" receiving language : " + lan)
l.info(" Incoming msg: " + imgname + " sending from " + rmq_q)
if (rmq_q == 'image_url'):
checkingImgURL(imgname, uuid, lan)
else:
l.info('imagePath')
imgPathToS3(imgname, uuid, lan)
l.info('*******************')
channel.basic_consume(queue=rmq_q, on_message_callback=callback, auto_ack=True)
l.info('[*] Waiting for messages in the ' + rmq_q + ' queue. To exit press CTRL+C')
channel.start_consuming()
except:
l.exception("Consumer for: " + rmq_q + " died unexpectedly. Restarting in 5 seconds...")
time.sleep(5)
def main():
try:
l.info("Starting rabbitMQ backend server...")
# creating thread
queue = "image_url"
t1 = threading.Thread(target = receive, args=(queue,))
t1.start()
queue = "image_path"
t2 = threading.Thread(target = receive, args = (queue,))
t2.start()
except:
l.error("Unable to start thread")
return False
else:
l.info("end")
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Exiting')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| 32.938053
| 157
| 0.602499
|
2627671d4a137c47e7f15063f5bd7f6726472c61
| 6,999
|
py
|
Python
|
traj/src/traj/synchronize_joint_motion.py
|
jonbinney/trajectory_smoothing
|
0e2b8d7d646c96c0c22eef1371bcd42d169121dc
|
[
"Apache-2.0"
] | 8
|
2020-03-04T07:49:44.000Z
|
2021-09-08T08:32:40.000Z
|
traj/src/traj/synchronize_joint_motion.py
|
iron-ox/trajectory_smoothing
|
4e9f45b3c31f254e8443936fd0cdb1940c022460
|
[
"Apache-2.0"
] | 21
|
2019-05-07T22:32:14.000Z
|
2020-12-30T23:26:07.000Z
|
traj/src/traj/synchronize_joint_motion.py
|
jonbinney/trajectory_smoothing
|
0e2b8d7d646c96c0c22eef1371bcd42d169121dc
|
[
"Apache-2.0"
] | 8
|
2019-04-24T23:44:09.000Z
|
2021-09-07T08:16:57.000Z
|
#!/usr/bin/env python
import math
import numpy as np
import traj
import rospy
def synchronize_joint_motion(t_syn, pos_diff, v_start, v_end, abs_max_pos, abs_max_vel, abs_max_acc, abs_max_jrk):
'''
this function selects a motion profile for a general trajectory segment considering the total motion time of the segment is "t_syn"
it returns the jerk_value && duration associated with each phase of the segment
it raise an error if:
1. if the given time "t_syn" is less than the minimum time caclulated using the maximum jerk
2. if the given position differnce "pos_diff" is reached before the final velocity "v_end" can be reached
3. if the combination of the position difference"pos_diff", the final velocity "v_end", and the motion time "t_syn"
gives non-monotonic motion
this function is based on the same idea of the paper entitled :
Online Trajectory Generation: Basic Concepts for Instantaneous Reactions to Unforeseen Events[1],
section V, synchronization steps 1,2,3
[1] https://www-cs.stanford.edu/groups/manips/publications/pdfs/Kroeger_2010_TRO.pdf
'''
abs_v_start = abs(v_start)
abs_v_end = abs(v_end)
jm = abs_max_jrk
# calculate all variables that determine which equation will be used for synchronization
tj_2vf, ta_2vf, tj, ta, tv = traj.traj_segment_planning(0.0, pos_diff, abs_v_start, abs_v_end, abs_max_vel, abs_max_acc, abs_max_jrk)
min_pos_2vf, acc_2vf, tj_2vf, ta_2vf = traj.calculate_min_pos_reached_acc_jrk_time_acc_time_to_reach_final_vel( v_start, v_end, abs_max_vel, abs_max_acc, abs_max_jrk)
# assign new values for tj, ta, tv
pd_eq_vel = pos_diff - min_pos_2vf
t_eq_vel = t_syn - (2*tj_2vf + ta_2vf)
tav = ta
tvv = tv
tjv = (t_eq_vel - 2*tav - tvv) / 4.0
if tj == 0.0 and ta== 0.0:
tjv = t_eq_vel / 4.0
tav = 0.0
tvv = 0.0
# choose a motion profile
case = 1
rospy.logdebug(">> synchronize_jt_7phs case 1")
v = max(abs_v_start, abs_v_end)
jk = -(2*tav*v - pd_eq_vel + 4*tjv*v + tvv*v)/(tav**2*tjv + 3*tav*tjv**2 + tvv*tav*tjv + 2*tjv**3 + tvv*tjv**2)
a1 = jk*tjv
a2 = a1
v1 = jk*tjv*tjv/2 + + v
v2 = a1*tav + v1
v3 = -jk*tjv*tjv/2 + a2*tjv + v2
rospy.logdebug(">> jk, a1, v3: {}, {}, {}".format( jk, a1, v3))
if abs(jk) > abs_max_jrk or v3 < 0.0 or v3 > abs_max_vel or abs(a1)> abs_max_acc:
rospy.logdebug( ">> synchronize_jt_7phs case 2" )
case = 2
v = min(abs_v_start, abs_v_end)
jk = -(2*tav*v - pd_eq_vel + 4*tjv*v + tvv*v)/(tav**2*tjv + 3*tav*tjv**2 + tvv*tav*tjv + 2*tjv**3 + tvv*tjv**2)
a1 = jk*tjv
a2 = a1
v1 = jk*tjv*tjv/2 + + v
v2 = a1*tav + v1
v3 = -jk*tjv*tjv/2 + a2*tjv + v2
rospy.logdebug(">> jk, a1, v3: {}, {}, {}".format( jk, a1, v3))
if abs(jk) > abs_max_jrk or v3 < 0.0 or v3 > abs_max_vel or abs(a1)> abs_max_acc:
raise ValueError("synchronize_jt_7phs: motion is not feasible")
# caculate jrk_sign_dur according to case
if case == 1:
if abs_v_end < abs_v_start:
jrk_sgn_dur = [(jk, tjv), (0.0, tav), (-jk, tjv), (0.0, tvv), (-jk,tjv), (0.0, tav), (jk, tjv),
(-jm, tj_2vf), (0.0, ta_2vf), (jm, tj_2vf)]
else:
jrk_sgn_dur = [(jm, tj_2vf), (0.0, ta_2vf), (-jm, tj_2vf),
(jk, tjv), (0.0, tav), (-jk, tjv), (0.0, tvv), (-jk,tjv), (0.0, tav), (jk, tjv)]
elif case == 2:
if abs_v_end > abs_v_start:
jrk_sgn_dur = [(jk, tjv), (0.0, tav), (-jk, tjv), (0.0, tvv), (-jk,tjv), (0.0, tav), (jk, tjv),
(jm, tj_2vf), (0.0, ta_2vf), (-jm, tj_2vf)]
else:
jrk_sgn_dur = [(-jm, tj_2vf), (0.0, ta_2vf), (jm, tj_2vf),
(jk, tjv), (0.0, tav), (-jk, tjv), (0.0, tvv), (-jk,tjv), (0.0, tav), (jk, tjv)]
return jrk_sgn_dur
def motion_direction( v_start, v_end, pos_diff):
'''
this function checks the direction of the motion based on the starting/ending velocity and the position difference
if the position differnce is not aligned with the direction of the starting/ending velocity it raises an error
'''
# positive_motion_case:
if v_start >= 0 and v_end >= 0 and pos_diff >=0:
return 1
# negative_motion_case:
elif v_start <= 0 and v_end <= 0 and pos_diff <=0:
return -1
# complex_motion_case:
else:
raise ValueError("identify_motion_direction: motion is not feasible")
return 0
def segment_synchronization(pos_start, pos_end, vel_start, vel_end,
abs_max_pos, abs_max_vel, abs_max_acc, abs_max_jrk):
'''
A high level segment synchronization function based on the "synchronize_joint_motion" function.
it is used to synchronize n-dof segment!
this function is based on the same idea of the paper entitled :
Online Trajectory Generation: Basic Concepts for Instantaneous Reactions to Unforeseen Events[1],
section V, synchronization steps 1,2,3
[1] https://www-cs.stanford.edu/groups/manips/publications/pdfs/Kroeger_2010_TRO.pdf
'''
rospy.logdebug(">> pos_start:\n{}".format(pos_start))
rospy.logdebug(">> pos_end:\n{}".format(pos_end))
rospy.logdebug(">> vel_start:\n{}".format(vel_start))
rospy.logdebug(">> vel_end:\n{}".format(vel_end))
pos_diff = [pf-pi for pi, pf in zip(pos_start, pos_end)]
motion_dir = []
n_jts = len(pos_diff)
for jt in range(n_jts):
motion_dir.append(traj.motion_direction(vel_start[jt], vel_end[jt], pos_diff[jt]))
# step 1: find the minimum time motion for each joints
min_motion_time = [ ]
for jt in range(n_jts):
# min time for each segment: phases times
tj_2vf, ta_2vf, t_jrk, t_acc, t_vel = traj.traj_segment_planning(0.0, abs(pos_diff[jt]), abs(vel_start[jt]), abs(vel_end[jt]),
abs_max_vel[jt], abs_max_acc[jt], abs_max_jrk[jt])
min_time = 2*tj_2vf + ta_2vf + 4*t_jrk + 2*t_acc + t_vel
min_motion_time.append(min_time)
# step 2: find the joint that has the maximum time motion (reference joint)
ref_jt = min_motion_time.index(max(min_motion_time))
min_sync_time = max(min_motion_time)
syn_t = min_sync_time
rospy.logdebug(">> syn_t : {} ".format(syn_t))
rospy.logdebug(">> ref_jt: {} ".format(ref_jt))
rospy.logdebug(">> min_T : {} ".format(min_motion_time))
# step 3: calculate new jrk_sgn_dur
phase_dur_jt = []
phase_jrk_jt = []
for jt in range(n_jts):
rospy.logdebug("\n\n>> jt:{}, PD: {}, v_start:{}, v_end:{}".format(jt, pos_diff[jt], vel_start[jt], vel_end[jt]))
p_diff = abs(pos_diff[jt])
v_start = abs(vel_start[jt])
v_end = abs(vel_end[jt])
if jt == ref_jt:
jrk_sign_dur = traj.calculate_jerk_sign_and_duration(0.0, p_diff, v_start, v_end,
abs_max_pos[jt], abs_max_vel[jt], abs_max_acc[jt], abs_max_jrk[jt])
else:
jrk_sign_dur = synchronize_joint_motion(syn_t, p_diff, v_start, v_end,
abs_max_pos[jt], abs_max_vel[jt], abs_max_acc[jt], abs_max_jrk[jt])
dur = [jsd[1] for jsd in jrk_sign_dur]
jrk = [motion_dir[jt]*jsd[0] for jsd in jrk_sign_dur]
phase_dur_jt.append(dur)
phase_jrk_jt.append(jrk)
rospy.logdebug(">> dur:{}".format(sum(dur)))
return min_sync_time, phase_dur_jt, phase_jrk_jt
| 43.74375
| 167
| 0.675096
|
9520d49478732e22c744b658635a91be123057eb
| 2,718
|
py
|
Python
|
domain/bitbucket.py
|
keshrisohit/devops_metrics
|
47252869a9154763d86e170be792cdd804c52871
|
[
"MIT"
] | 6
|
2020-02-12T04:44:09.000Z
|
2021-09-09T17:02:21.000Z
|
domain/bitbucket.py
|
keshrisohit/devops_metrics
|
47252869a9154763d86e170be792cdd804c52871
|
[
"MIT"
] | 2
|
2019-12-30T08:44:09.000Z
|
2021-06-02T00:50:15.000Z
|
domain/bitbucket.py
|
keshrisohit/devops_metrics
|
47252869a9154763d86e170be792cdd804c52871
|
[
"MIT"
] | 2
|
2019-12-30T14:35:51.000Z
|
2021-04-05T07:45:01.000Z
|
import requests
from config import BITBUCKET_CLIENT_ID, BITBUCKET_SECRET_KEY
from domain.utils import get_access_token
client_id = BITBUCKET_CLIENT_ID
client_secret = BITBUCKET_SECRET_KEY
token_url = "https://bitbucket.org/site/oauth2/access_token"
class BitbucketClient(object):
def __init__(self, access_token=None):
self.access_token = access_token
if not self.access_token:
self.access_token = get_access_token(client_id, client_secret, token_url)
def pull_request_commit_list(self, commit_url):
_next = commit_url
commits_list = []
while True:
values, _next = self._get_pull_request_commits(_next)
commits_list.extend(values)
if not _next:
break
return commits_list
def _get_pull_request_commits(self, commits_url):
next = None
values = []
try:
response = self.call(commits_url)
response_data = response.json()
if 'next' in response_data:
next = response_data['next']
values = response.json()['values']
except Exception as e:
print(e)
return values, next
def _get_pull_request_diff_count(self, url):
lines_added = 0
lines_removed = 0
files_changed = 0
diff_stat_url = '{}{}'.format(url, 'stat')
try:
response = self.call(diff_stat_url)
next = None
response_data = response.json()
if 'next' in response_data:
next = response['next']
values = response.json()['values']
for value in values:
lines_removed += value['lines_removed']
files_changed += 1
lines_added += value['lines_added']
except Exception as e:
print(e)
return lines_added, lines_removed, next, files_changed
def traverse_diff_count(self, url):
_next = url
while True:
lines_added, lines_removed, _next, files_changed = self._get_pull_request_diff_count(
_next)
yield lines_added, lines_removed, files_changed
if not _next:
break
def call(self, url):
response = None
try:
response = requests.get(url, headers={'Authorization': 'Bearer {}'.format(self.access_token)})
if response.status_code == 401:
# access token might have expired
self.access_token = get_access_token(client_id, client_secret, token_url)
self.call(url)
except Exception as e:
print(e)
return response
| 28.3125
| 106
| 0.593819
|
c60638c594da2e29459e99c040c213553859bc04
| 700
|
py
|
Python
|
sls/ec2_alarms_api/create_ec2_alarms/tests/bdd/steps/steps.py
|
aws-samples/amazon-ec2-cloudwatch-alarms-sls
|
199d6500797ff32d9cbad966e24cdc40184ed56b
|
[
"MIT-0"
] | null | null | null |
sls/ec2_alarms_api/create_ec2_alarms/tests/bdd/steps/steps.py
|
aws-samples/amazon-ec2-cloudwatch-alarms-sls
|
199d6500797ff32d9cbad966e24cdc40184ed56b
|
[
"MIT-0"
] | null | null | null |
sls/ec2_alarms_api/create_ec2_alarms/tests/bdd/steps/steps.py
|
aws-samples/amazon-ec2-cloudwatch-alarms-sls
|
199d6500797ff32d9cbad966e24cdc40184ed56b
|
[
"MIT-0"
] | null | null | null |
"""
Contains behave step implementation
"""
# pylint: disable = import-error,no-name-in-module,C0413,missing-function-docstring,wrong-import-order
import os
from behave import when, given
from ec2_alarms_api.common_bdd import common_steps
from sls.ec2_alarms_api.create_ec2_alarms.index import handler
THISDIR = os.path.dirname(__file__) # steps/
BDD_DIR = os.path.dirname(THISDIR) # bdd
@when(u'we invoke the api')
def invoke_api(context):
common_steps.invoke_api(context, 'PUT', handler)
@given(u'{operating_system} ec2 instance tagged with ec2_hostname running in the account')
def instance_running(context, operating_system):
common_steps.set_hostname(context, operating_system)
| 29.166667
| 102
| 0.791429
|
7e7d491d8329f4b704c29271d9e8edaedb010c8a
| 3,262
|
py
|
Python
|
Addition/PythonPlotter/Valkyrie/plot_centroid.py
|
shbang91/PnC
|
880cbbcf96a48a93a0ab646634781e4f112a71f6
|
[
"MIT"
] | null | null | null |
Addition/PythonPlotter/Valkyrie/plot_centroid.py
|
shbang91/PnC
|
880cbbcf96a48a93a0ab646634781e4f112a71f6
|
[
"MIT"
] | null | null | null |
Addition/PythonPlotter/Valkyrie/plot_centroid.py
|
shbang91/PnC
|
880cbbcf96a48a93a0ab646634781e4f112a71f6
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import os
figure_number = 0
col_index = 0
row_index = 0
file_path = os.getcwd() + "/../../../ExperimentDataCheck/"
## read files
data_com_pos = \
np.genfromtxt(file_path+'com_pos.txt', delimiter=None, dtype=(float))
data_com_pos_des = \
np.genfromtxt(file_path+'com_pos_des.txt', delimiter=None, dtype=(float))
data_com_vel = \
np.genfromtxt(file_path+'com_vel.txt', delimiter=None, dtype=(float))
data_com_vel_des = \
np.genfromtxt(file_path+'com_vel_des.txt', delimiter=None, dtype=(float))
data_centroid_momentum = \
np.genfromtxt(file_path+'cm.txt', delimiter=None, dtype=(float))
data_centroid_momentum_des = \
np.genfromtxt(file_path+'cm_des.txt', delimiter=None, dtype=(float))
data_x = np.genfromtxt(file_path+'time.txt', delimiter='\n', dtype=(float))
st_idx = 1
end_idx = len(data_x) - 10
data_x = data_x[st_idx:end_idx]
data_phse = np.genfromtxt(file_path+'phase.txt', delimiter=None, dtype=(float))
data_phse = data_phse[st_idx:end_idx]
phseChange = []
for i in range(0,len(data_x)-1):
if data_phse[i] != data_phse[i+1]:
phseChange.append(i)
else:
pass
axes = plt.gca()
## plot com pos
fig = plt.figure(figure_number)
fig.canvas.set_window_title('com pos')
for i in range(1,4,1):
ax1 = plt.subplot(3, 1, i)
plt.plot(data_x, data_com_pos[st_idx:end_idx,i-1], "b-")
plt.plot(data_x, data_com_pos_des[st_idx:end_idx,i-1], "r-")
plt.grid(True)
for j in phseChange:
plt.axvline(x=data_x[j],color='indigo',linestyle='-')
plt.text(data_x[j],ax1.get_ylim()[1],'%d'%(data_phse[j]),color='indigo')
plt.xlabel('time (sec)')
figure_number += 1
## plot com vel
fig = plt.figure(figure_number)
fig.canvas.set_window_title('com vel')
for i in range(1,4,1):
ax1 = plt.subplot(3, 1, i)
plt.plot(data_x, data_com_vel[st_idx:end_idx,i-1], "b-")
plt.plot(data_x, data_com_vel_des[st_idx:end_idx,i-1], "r-")
plt.grid(True)
for j in phseChange:
plt.axvline(x=data_x[j],color='indigo',linestyle='-')
plt.text(data_x[j],ax1.get_ylim()[1],'%d'%(data_phse[j]),color='indigo')
plt.xlabel('time (sec)')
figure_number += 1
# plot amom
fig = plt.figure(figure_number)
fig.canvas.set_window_title('amom')
for i in range(1,4,1):
ax1 = plt.subplot(3, 1, i)
plt.plot(data_x, data_centroid_momentum[st_idx:end_idx,i-1], "b-")
plt.plot(data_x, data_centroid_momentum_des[st_idx:end_idx,i-1], "r-")
plt.grid(True)
for j in phseChange:
plt.axvline(x=data_x[j],color='indigo',linestyle='-')
plt.text(data_x[j],ax1.get_ylim()[1],'%d'%(data_phse[j]),color='indigo')
plt.xlabel('time (sec)')
figure_number += 1
## plot lmom
fig = plt.figure(figure_number)
fig.canvas.set_window_title('lmom')
for i in range(1,4,1):
ax1 = plt.subplot(3, 1, i)
plt.plot(data_x, data_centroid_momentum[st_idx:end_idx,i+2], "b-")
plt.plot(data_x, data_centroid_momentum_des[st_idx:end_idx,i+2], "r-")
plt.grid(True)
for j in phseChange:
plt.axvline(x=data_x[j],color='indigo',linestyle='-')
plt.text(data_x[j],ax1.get_ylim()[1],'%d'%(data_phse[j]),color='indigo')
plt.xlabel('time (sec)')
figure_number += 1
plt.show()
| 30.773585
| 80
| 0.679951
|
5624f74ab0f575f9434587b84cb28d1d4ce51f9d
| 44,375
|
py
|
Python
|
train.py
|
bug0306/Sign-language-recognition-based-on-TensorFlow
|
3467e03f28f037f64787e8c8712ed7c7a9ffabfc
|
[
"MIT"
] | null | null | null |
train.py
|
bug0306/Sign-language-recognition-based-on-TensorFlow
|
3467e03f28f037f64787e8c8712ed7c7a9ffabfc
|
[
"MIT"
] | null | null | null |
train.py
|
bug0306/Sign-language-recognition-based-on-TensorFlow
|
3467e03f28f037f64787e8c8712ed7c7a9ffabfc
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
#from datetime import datetime
import hashlib
import os.path
import random
import re
import struct
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import gfile
from tensorflow.python.util import compat
FLAGS = None
# These are all parameters that are tied to the particular model architecture
# we're using for Inception v3. These include things like tensor names and their
# sizes. If you want to adapt this script to work with another model, you will
# need to update these to reflect the values in the network you're using.
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
BOTTLENECK_TENSOR_NAME = 'pool_3/_reshape:0'
BOTTLENECK_TENSOR_SIZE = 2048
MODEL_INPUT_WIDTH = 299
MODEL_INPUT_HEIGHT = 299
MODEL_INPUT_DEPTH = 3
JPEG_DATA_TENSOR_NAME = 'DecodeJpeg/contents:0'
RESIZED_INPUT_TENSOR_NAME = 'ResizeBilinear:0'
MAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M
def create_image_lists(image_dir, testing_percentage, validation_percentage):
"""
Brief:
Builds a list of training images from the file system.
Analyzes the sub folders in the image directory, splits them into stable
training, testing, and validation sets, and returns a data structure
describing the lists of images for each label and their paths.
Args:
image_dir: String path to a folder containing subfolders of images.
testing_percentage: Integer percentage of the images to reserve for tests.
validation_percentage: Integer percentage of images reserved for validation.
Returns:
A dictionary containing an entry for each label subfolder, with images split
into training, testing, and validation sets within each label.
"""
if not gfile.Exists(image_dir):
print("Image directory '" + image_dir + "' not found.")
return None
result = {}
sub_dirs = [x[0] for x in gfile.Walk(image_dir)]
# The root directory comes first, so skip it.
is_root_dir = True
for sub_dir in sub_dirs:
if is_root_dir:
is_root_dir = False
continue
extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']
file_list = []
dir_name = os.path.basename(sub_dir)
if dir_name == image_dir:
continue
print("Looking for images in '" + dir_name + "'")
for extension in extensions:
file_glob = os.path.join(image_dir, dir_name, '*.' + extension)
file_list.extend(gfile.Glob(file_glob))
if not file_list:
print('No files found')
continue
if len(file_list) < 20:
print('WARNING: Folder has less than 20 images, which may cause issues.')
elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS:
print('WARNING: Folder {} has more than {} images. Some images will '
'never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS))
label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())
training_images = []
testing_images = []
validation_images = []
for file_name in file_list:
base_name = os.path.basename(file_name)
# We want to ignore anything after '_nohash_' in the file name when
# deciding which set to put an image in, the data set creator has a way of
# grouping photos that are close variations of each other. For example
# this is used in the plant disease data set to group multiple pictures of
# the same leaf.
hash_name = re.sub(r'_nohash_.*$', '', file_name)
# This looks a bit magical, but we need to decide whether this file should
# go into the training, testing, or validation sets, and we want to keep
# existing files in the same set even if more files are subsequently
# added.
# To do that, we need a stable way of deciding based on just the file name
# itself, so we do a hash of that and then use that to generate a
# probability value that we use to assign it.
hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()
percentage_hash = ((int(hash_name_hashed, 16) %
(MAX_NUM_IMAGES_PER_CLASS + 1)) *
(100.0 / MAX_NUM_IMAGES_PER_CLASS))
if percentage_hash < validation_percentage:
validation_images.append(base_name)
elif percentage_hash < (testing_percentage + validation_percentage):
testing_images.append(base_name)
else:
training_images.append(base_name)
result[label_name] = {
'dir': dir_name,
'training': training_images,
'testing': testing_images,
'validation': validation_images,
}
return result
def get_image_path(image_lists, label_name, index, image_dir, category):
""""
Brief:
Returns a path to an image for a label at the given index.
Args:
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Int offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training images.
category: Name string of set to pull images from - training, testing, or validation.
Returns:
File system path string to an image that meets the requested parameters.
"""
if label_name not in image_lists:
tf.logging.fatal('Label does not exist %s.', label_name)
label_lists = image_lists[label_name]
if category not in label_lists:
tf.logging.fatal('Category does not exist %s.', category)
category_list = label_lists[category]
if not category_list:
tf.logging.fatal('Label %s has no images in the category %s.', label_name, category)
mod_index = index % len(category_list)
base_name = category_list[mod_index]
sub_dir = label_lists['dir']
full_path = os.path.join(image_dir, sub_dir, base_name)
return full_path
def get_bottleneck_path(image_lists, label_name, index, bottleneck_dir, category):
""""
Brief:
Returns a path to a bottleneck file for a label at the given index.
Args:
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
bottleneck_dir: Folder string holding cached files of bottleneck values.
category: Name string of set to pull images from - training, testing, or validation.
Returns:
File system path string to an image that meets the requested parameters.
"""
return get_image_path(image_lists, label_name, index, bottleneck_dir,
category) + '.txt'
def create_inception_graph():
""""
Brief:
Creates a graph from saved GraphDef file and returns a Graph object.
Returns:
Graph holding the trained Inception network, and various tensors we'll be
manipulating.
"""
with tf.Graph().as_default() as graph:
model_filename = os.path.join(FLAGS.model_dir, 'classify_image_graph_def.pb')
with gfile.FastGFile(model_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
tf.import_graph_def(graph_def, name='', return_elements=[
BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
RESIZED_INPUT_TENSOR_NAME]))
return graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor
def run_bottleneck_on_image(sess, image_data, image_data_tensor, bottleneck_tensor):
""""
Brief:
Runs inference on an image to extract the 'bottleneck' summary layer.
Args:
sess: Current active TensorFlow Session.
image_data: String of raw JPEG data.
image_data_tensor: Input data layer in the graph.
bottleneck_tensor: Layer before the final softmax.
Returns:
Numpy array of bottleneck values.
"""
bottleneck_values = sess.run(
bottleneck_tensor,
{image_data_tensor: image_data})
bottleneck_values = np.squeeze(bottleneck_values)
return bottleneck_values
def maybe_download_and_extract():
"""
Brief:
Download and extract model tar file.
If the pretrained model we're using doesn't already exist, this function
downloads it from the TensorFlow.org website and unpacks it into a directory.
"""
dest_directory = FLAGS.model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' %
(filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def ensure_dir_exists(dir_name):
"""
Brief:
Makes sure the folder exists on disk.
Args:
dir_name: Path string to the folder we want to create.
"""
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def write_list_of_floats_to_file(list_of_floats, file_path):
"""
Brief:
Writes a given list of floats to a binary file.
Args:
list_of_floats: List of floats we want to write to a file.
file_path: Path to a file where list of floats will be stored.
"""
s = struct.pack('d' * BOTTLENECK_TENSOR_SIZE, *list_of_floats)
with open(file_path, 'wb') as f:
f.write(s)
def read_list_of_floats_from_file(file_path):
"""
Brief:
Reads list of floats from a given file.
Args:
file_path: Path to a file where list of floats was stored.
Returns:
Array of bottleneck values (list of floats).
"""
with open(file_path, 'rb') as f:
s = struct.unpack('d' * BOTTLENECK_TENSOR_SIZE, f.read())
return list(s)
bottleneck_path_2_bottleneck_values = {}
def create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
bottleneck_tensor):
"""Create a single bottleneck file."""
print('Creating bottleneck at ' + bottleneck_path)
image_path = get_image_path(image_lists, label_name, index,
image_dir, category)
if not gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
image_data = gfile.FastGFile(image_path, 'rb').read()
try:
bottleneck_values = run_bottleneck_on_image(
sess, image_data, jpeg_data_tensor, bottleneck_tensor)
except:
raise RuntimeError('Error during processing file %s' % image_path)
bottleneck_string = ','.join(str(x) for x in bottleneck_values)
with open(bottleneck_path, 'w') as bottleneck_file:
bottleneck_file.write(bottleneck_string)
def get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir,
category, bottleneck_dir, jpeg_data_tensor,
bottleneck_tensor):
"""
Brief:
Retrieves or calculates bottleneck values for an image.
If a cached version of the bottleneck data exists on-disk, return that,
otherwise calculate the data and save it to disk for future use.
Args:
sess: The current active TensorFlow Session.
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be modulo-ed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of which set to pull images from - training, testing,
or validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: The tensor to feed loaded jpeg data into.
bottleneck_tensor: The output tensor for the bottleneck values.
Returns:
Numpy array of values produced by the bottleneck layer for the image.
"""
label_lists = image_lists[label_name]
sub_dir = label_lists['dir']
sub_dir_path = os.path.join(bottleneck_dir, sub_dir)
ensure_dir_exists(sub_dir_path)
bottleneck_path = get_bottleneck_path(image_lists, label_name, index,
bottleneck_dir, category)
if not os.path.exists(bottleneck_path):
create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
bottleneck_tensor)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
did_hit_error = False
try:
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
except ValueError:
print('Invalid float found, recreating bottleneck')
did_hit_error = True
if did_hit_error:
create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
bottleneck_tensor)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
# Allow exceptions to propagate here, since they shouldn't happen after a
# fresh creation
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
return bottleneck_values
def cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir,
jpeg_data_tensor, bottleneck_tensor):
"""
Brief:
Ensures all the training, testing, and validation bottlenecks are cached.
Because we're likely to read the same image multiple times (if there are no
distortions applied during training) it can speed things up a lot if we
calculate the bottleneck layer values once for each image during
preprocessing, and then just read those cached values repeatedly during
training. Here we go through all the images we've found, calculate those
values, and save them off.
Args:
sess: The current active TensorFlow Session.
image_lists: Dictionary of training images for each label.
image_dir: Root folder string of the subfolders containing the training
images.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: Input tensor for jpeg data from file.
bottleneck_tensor: The penultimate output layer of the graph.
Returns:
Nothing.
"""
how_many_bottlenecks = 0
ensure_dir_exists(bottleneck_dir)
for label_name, label_lists in image_lists.items():
for category in ['training', 'testing', 'validation']:
category_list = label_lists[category]
for index, unused_base_name in enumerate(category_list):
get_or_create_bottleneck(sess, image_lists, label_name, index,
image_dir, category, bottleneck_dir,
jpeg_data_tensor, bottleneck_tensor)
how_many_bottlenecks += 1
if how_many_bottlenecks % 100 == 0:
print(str(how_many_bottlenecks) + ' bottleneck files created.')
def get_random_cached_bottlenecks(sess, image_lists, how_many, category,
bottleneck_dir, image_dir, jpeg_data_tensor,
bottleneck_tensor):
"""
Brief:
Retrieves bottleneck values for cached images.
If no distortions are being applied, this function can retrieve the cached
bottleneck values directly from disk for images. It picks a random set of
images from the specified category.
Args:
sess: Current TensorFlow Session.
image_lists: Dictionary of training images for each label.
how_many: If positive, a random sample of this size will be chosen.
If negative, all bottlenecks will be retrieved.
category: Name string of which set to pull from - training, testing, or
validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
image_dir: Root folder string of the subfolders containing the training
images.
jpeg_data_tensor: The layer to feed jpeg image data into.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
Returns:
List of bottleneck arrays, their corresponding ground truths, and the
relevant filenames.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
filenames = []
if how_many >= 0:
# Retrieve a random sample of bottlenecks.
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(sess, image_lists, label_name,
image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor,
bottleneck_tensor)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
filenames.append(image_name)
else:
# Retrieve all bottlenecks.
for label_index, label_name in enumerate(image_lists.keys()):
for image_index, image_name in enumerate(
image_lists[label_name][category]):
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(sess, image_lists, label_name,
image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor,
bottleneck_tensor)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
filenames.append(image_name)
return bottlenecks, ground_truths, filenames
def get_random_distorted_bottlenecks(
sess, image_lists, how_many, category, image_dir, input_jpeg_tensor,
distorted_image, resized_input_tensor, bottleneck_tensor):
"""
Brief:
Retrieves bottleneck values for training images, after distortions.
If we're training with distortions like crops, scales, or flips, we have to
recalculate the full model for every image, and so we can't use cached
bottleneck values. Instead we find random images for the requested category,
run them through the distortion graph, and then the full graph to get the
bottleneck results for each.
Args:
sess: Current TensorFlow Session.
image_lists: Dictionary of training images for each label.
how_many: The integer number of bottleneck values to return.
category: Name string of which set of images to fetch - training, testing,
or validation.
image_dir: Root folder string of the subfolders containing the training
images.
input_jpeg_tensor: The input layer we feed the image data to.
distorted_image: The output node of the distortion graph.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
Returns:
List of bottleneck arrays and their corresponding ground truths.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_path = get_image_path(image_lists, label_name, image_index, image_dir,
category)
if not gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
jpeg_data = gfile.FastGFile(image_path, 'rb').read()
# Note that we materialize the distorted_image_data as a numpy array before
# sending running inference on the image. This involves 2 memory copies and
# might be optimized in other implementations.
distorted_image_data = sess.run(distorted_image,
{input_jpeg_tensor: jpeg_data})
bottleneck = run_bottleneck_on_image(sess, distorted_image_data,
resized_input_tensor,
bottleneck_tensor)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
return bottlenecks, ground_truths
def should_distort_images(flip_left_right, random_crop, random_scale,
random_brightness):
"""
Brief:
Whether any distortions are enabled, from the input flags.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
Returns:
Boolean value indicating whether any distortions should be applied.
"""
return (flip_left_right or (random_crop != 0) or (random_scale != 0) or
(random_brightness != 0))
def add_input_distortions(flip_left_right, random_crop, random_scale,
random_brightness):
"""
Brief:
Creates the operations to apply the specified distortions.
During training it can help to improve the results if we run the images
through simple distortions like crops, scales, and flips. These reflect the
kind of variations we expect in the real world, and so can help train the
model to cope with natural data more effectively. Here we take the supplied
parameters and construct a network of operations to apply them to an image.
Cropping
Cropping is done by placing a bounding box at a random position in the full
image. The cropping parameter controls the size of that box relative to the
input image. If it's zero, then the box is the same size as the input and no
cropping is performed. If the value is 50%, then the crop box will be half the
width and height of the input. In a diagram it looks like this:
< width >
+---------------------+
| |
| width - crop% |
| < > |
| +------+ |
| | | |
| | | |
| | | |
| +------+ |
| |
| |
+---------------------+
Scaling
Scaling is a lot like cropping, except that the bounding box is always
centered and its size varies randomly within the given range. For example if
the scale percentage is zero, then the bounding box is the same size as the
input and no scaling is applied. If it's 50%, then the bounding box will be in
a random range between half the width and height and full size.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
graph.
Returns:
The jpeg input layer and the distorted result tensor.
"""
jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput')
decoded_image = tf.image.decode_jpeg(jpeg_data, channels=MODEL_INPUT_DEPTH)
decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)
decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
margin_scale = 1.0 + (random_crop / 100.0)
resize_scale = 1.0 + (random_scale / 100.0)
margin_scale_value = tf.constant(margin_scale)
resize_scale_value = tf.random_uniform(tensor_shape.scalar(),
minval=1.0,
maxval=resize_scale)
scale_value = tf.multiply(margin_scale_value, resize_scale_value)
precrop_width = tf.multiply(scale_value, MODEL_INPUT_WIDTH)
precrop_height = tf.multiply(scale_value, MODEL_INPUT_HEIGHT)
precrop_shape = tf.stack([precrop_height, precrop_width])
precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32)
precropped_image = tf.image.resize_bilinear(decoded_image_4d,
precrop_shape_as_int)
precropped_image_3d = tf.squeeze(precropped_image, squeeze_dims=[0])
cropped_image = tf.random_crop(precropped_image_3d,
[MODEL_INPUT_HEIGHT, MODEL_INPUT_WIDTH,
MODEL_INPUT_DEPTH])
if flip_left_right:
flipped_image = tf.image.random_flip_left_right(cropped_image)
else:
flipped_image = cropped_image
brightness_min = 1.0 - (random_brightness / 100.0)
brightness_max = 1.0 + (random_brightness / 100.0)
brightness_value = tf.random_uniform(tensor_shape.scalar(),
minval=brightness_min,
maxval=brightness_max)
brightened_image = tf.multiply(flipped_image, brightness_value)
distort_result = tf.expand_dims(brightened_image, 0, name='DistortResult')
return jpeg_data, distort_result
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def add_final_training_ops(class_count, final_tensor_name, bottleneck_tensor):
"""
Brief:
Adds a new softmax and fully-connected layer for training.
We need to retrain the top layer to identify our new classes, so this function
adds the right operations to the graph, along with some variables to hold the
weights, and then sets up all the gradients for the backward pass.
The set up for the softmax and fully-connected layers is based on:
https://tensorflow.org/versions/master/tutorials/mnist/beginners/index.html
Args:
class_count: Integer of how many categories of things we're trying to
recognize.
final_tensor_name: Name string for the new final node that produces results.
bottleneck_tensor: The output of the main CNN graph.
Returns:
The tensors for the training and cross entropy results, and tensors for the
bottleneck input and ground truth input.
"""
with tf.name_scope('input'):
bottleneck_input = tf.placeholder_with_default(
bottleneck_tensor, shape=[None, BOTTLENECK_TENSOR_SIZE],
name='BottleneckInputPlaceholder')
ground_truth_input = tf.placeholder(tf.float32,
[None, class_count],
name='GroundTruthInput')
# Organizing the following ops as `final_training_ops` so they're easier
# to see in TensorBoard
layer_name = 'final_training_ops'
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
initial_value = tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, class_count],
stddev=0.001)
layer_weights = tf.Variable(initial_value, name='final_weights')
variable_summaries(layer_weights)
with tf.name_scope('biases'):
layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')
variable_summaries(layer_biases)
with tf.name_scope('Wx_plus_b'):
logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases
tf.summary.histogram('pre_activations', logits)
final_tensor = tf.nn.softmax(logits, name=final_tensor_name)
tf.summary.histogram('activations', final_tensor)
with tf.name_scope('cross_entropy'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
labels=ground_truth_input, logits=logits)
with tf.name_scope('total'):
cross_entropy_mean = tf.reduce_mean(cross_entropy)
tf.summary.scalar('cross_entropy', cross_entropy_mean)
with tf.name_scope('train'):
optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
train_step = optimizer.minimize(cross_entropy_mean)
return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input,
final_tensor)
def add_evaluation_step(result_tensor, ground_truth_tensor):
"""
Brief:
Inserts the operations we need to evaluate the accuracy of our results.
Args:
result_tensor: The new final node that produces results.
ground_truth_tensor: The node we feed ground truth data
into.
Returns:
Tuple of (evaluation step, prediction).
"""
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
prediction = tf.argmax(result_tensor, 1)
correct_prediction = tf.equal(
prediction, tf.argmax(ground_truth_tensor, 1))
with tf.name_scope('accuracy'):
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', evaluation_step)
return evaluation_step, prediction
def main(_):
# Setup the directory we'll write summaries to for TensorBoard
if tf.io.gfile.Exists(FLAGS.summaries_dir):
tf.io.gfile.DeleteRecursively(FLAGS.summaries_dir)
tf.io.gfile.MakeDirs(FLAGS.summaries_dir)
# Set up the pre-trained graph.
maybe_download_and_extract()
graph, bottleneck_tensor, jpeg_data_tensor, resized_image_tensor = (
create_inception_graph())
# Look at the folder structure, and create lists of all the images.
image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage,
FLAGS.validation_percentage)
class_count = len(image_lists.keys())
if class_count == 0:
print('No valid folders of images found at ' + FLAGS.image_dir)
return -1
if class_count == 1:
print('Only one valid folder of images found at ' + FLAGS.image_dir +
' - multiple classes are needed for classification.')
return -1
# See if the command-line flags mean we're applying any distortions.
do_distort_images = should_distort_images(
FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
FLAGS.random_brightness)
with tf.Session(graph=graph) as sess:
if do_distort_images:
# We will be applying distortions, so setup the operations we'll need.
(distorted_jpeg_data_tensor,
distorted_image_tensor) = add_input_distortions(
FLAGS.flip_left_right, FLAGS.random_crop,
FLAGS.random_scale, FLAGS.random_brightness)
else:
# We'll make sure we've calculated the 'bottleneck' image summaries and
# cached them on disk.
cache_bottlenecks(sess, image_lists, FLAGS.image_dir,
FLAGS.bottleneck_dir, jpeg_data_tensor,
bottleneck_tensor)
# Add the new layer that we'll be training.
(train_step, cross_entropy, bottleneck_input, ground_truth_input,
final_tensor) = add_final_training_ops(len(image_lists.keys()),
FLAGS.final_tensor_name,
bottleneck_tensor)
# Create the operations we need to evaluate the accuracy of our new layer.
evaluation_step, prediction = add_evaluation_step(
final_tensor, ground_truth_input)
# Merge all the summaries and write them out to the summaries_dir
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',
sess.graph)
validation_writer = tf.summary.FileWriter(
FLAGS.summaries_dir + '/validation')
# Set up all our weights to their initial default values.
init = tf.global_variables_initializer()
sess.run(init)
# Run the training for as many cycles as requested on the command line.
for i in range(FLAGS.how_many_training_steps):
# Get a batch of input bottleneck values, either calculated fresh every
# time with distortions applied, or from the cache stored on disk.
if do_distort_images:
(train_bottlenecks,
train_ground_truth) = get_random_distorted_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.image_dir, distorted_jpeg_data_tensor,
distorted_image_tensor, resized_image_tensor, bottleneck_tensor)
else:
(train_bottlenecks,
train_ground_truth, _) = get_random_cached_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
bottleneck_tensor)
# Feed the bottlenecks and ground truth into the graph, and run a training
# step. Capture training summaries for TensorBoard with the `merged` op.
train_summary, _ = sess.run(
[merged, train_step],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
train_writer.add_summary(train_summary, i)
# Every so often, print out how well the graph is training.
is_last_step = (i + 1 == FLAGS.how_many_training_steps)
if (i % FLAGS.eval_step_interval) == 0 or is_last_step:
train_accuracy, cross_entropy_value = sess.run(
[evaluation_step, cross_entropy],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
validation_bottlenecks, validation_ground_truth, _ = (
get_random_cached_bottlenecks(
sess, image_lists, FLAGS.validation_batch_size, 'validation',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
bottleneck_tensor))
# Run a validation step and capture training summaries for TensorBoard
# with the `merged` op.
validation_summary, validation_accuracy = sess.run(
[merged, evaluation_step],
feed_dict={bottleneck_input: validation_bottlenecks,
ground_truth_input: validation_ground_truth})
validation_writer.add_summary(validation_summary, i)
print('Step: %d, Train accuracy: %.4f%%, Cross entropy: %f, Validation accuracy: %.1f%% (N=%d)' % (i,
train_accuracy * 100, cross_entropy_value, validation_accuracy * 100, len(validation_bottlenecks)))
# We've completed all our training, so run a final test evaluation on
# some new images we haven't used before.
test_bottlenecks, test_ground_truth, test_filenames = (
get_random_cached_bottlenecks(sess, image_lists, FLAGS.test_batch_size,
'testing', FLAGS.bottleneck_dir,
FLAGS.image_dir, jpeg_data_tensor,
bottleneck_tensor))
test_accuracy, predictions = sess.run(
[evaluation_step, prediction],
feed_dict={bottleneck_input: test_bottlenecks,
ground_truth_input: test_ground_truth})
print('Final test accuracy = %.1f%% (N=%d)' % (
test_accuracy * 100, len(test_bottlenecks)))
if FLAGS.print_misclassified_test_images:
print('=== MISCLASSIFIED TEST IMAGES ===')
for i, test_filename in enumerate(test_filenames):
if predictions[i] != test_ground_truth[i].argmax():
print('%70s %s' % (test_filename,
list(image_lists.keys())[predictions[i]]))
# Write out the trained graph and labels with the weights stored as
# constants.
output_graph_def = graph_util.convert_variables_to_constants(
sess, graph.as_graph_def(), [FLAGS.final_tensor_name])
with gfile.FastGFile(FLAGS.output_graph, 'wb') as f:
f.write(output_graph_def.SerializeToString())
with gfile.FastGFile(FLAGS.output_labels, 'w') as f:
f.write('\n'.join(image_lists.keys()) + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--image_dir',
type=str,
default='',
help='Path to folders of labeled images.'
)
parser.add_argument(
'--output_graph',
type=str,
default='logs/output_graph.pb',
help='Where to save the trained graph.'
)
parser.add_argument(
'--output_labels',
type=str,
default='logs/output_labels.txt',
help='Where to save the trained graph\'s labels.'
)
parser.add_argument(
'--summaries_dir',
type=str,
default='logs/retrain_logs',
help='Where to save summary logs for TensorBoard.'
)
parser.add_argument(
'--how_many_training_steps',
type=int,
default=5000,
help='How many training steps to run before ending.'
)
parser.add_argument(
'--learning_rate',
type=float,
default=0.01,
help='How large a learning rate to use when training.'
)
parser.add_argument(
'--testing_percentage',
type=int,
default=10,
help='What percentage of images to use as a test set.'
)
parser.add_argument(
'--validation_percentage',
type=int,
default=10,
help='What percentage of images to use as a validation set.'
)
parser.add_argument(
'--eval_step_interval',
type=int,
default=100,
help='How often to evaluate the training results.'
)
parser.add_argument(
'--train_batch_size',
type=int,
default=100,
help='How many images to train on at a time.'
)
parser.add_argument(
'--test_batch_size',
type=int,
default=-1,
help="""\
How many images to test on. This test set is only used once, to evaluate
the final accuracy of the model after training completes.
A value of -1 causes the entire test set to be used, which leads to more
stable results across runs.\
"""
)
parser.add_argument(
'--validation_batch_size',
type=int,
default=100,
help="""\
How many images to use in an evaluation batch. This validation set is
used much more often than the test set, and is an early indicator of how
accurate the model is during training.
A value of -1 causes the entire validation set to be used, which leads to
more stable results across training iterations, but may be slower on large
training sets.\
"""
)
parser.add_argument(
'--print_misclassified_test_images',
default=False,
help="""\
Whether to print out a list of all misclassified test images.\
""",
action='store_true'
)
parser.add_argument(
'--model_dir',
type=str,
default='logs/imagenet',
help="""\
Path to classify_image_graph_def.pb,
imagenet_synset_to_human_label_map.txt, and
imagenet_2012_challenge_label_map_proto.pbtxt.\
"""
)
parser.add_argument(
'--bottleneck_dir',
type=str,
default='/tmp/bottleneck',
help='Path to cache bottleneck layer values as files.'
)
parser.add_argument(
'--final_tensor_name',
type=str,
default='final_result',
help="""\
The name of the output classification layer in the retrained graph.\
"""
)
parser.add_argument(
'--flip_left_right',
default=False,
help="""\
Whether to randomly flip half of the training images horizontally.\
""",
action='store_true'
)
parser.add_argument(
'--random_crop',
type=int,
default=0,
help="""\
A percentage determining how much of a margin to randomly crop off the
training images.\
"""
)
parser.add_argument(
'--random_scale',
type=int,
default=0,
help="""\
A percentage determining how much to randomly scale up the size of the
training images by.\
"""
)
parser.add_argument(
'--random_brightness',
type=int,
default=0,
help="""\
A percentage determining how much to randomly multiply the training image
input pixels up or down by.\
"""
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 43.848814
| 123
| 0.63707
|
19b3822e55c6f577f53eb97c1b40ba38566011df
| 893
|
py
|
Python
|
homeassistant/components/whois/diagnostics.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
homeassistant/components/whois/diagnostics.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 24,710
|
2016-04-13T08:27:26.000Z
|
2020-03-02T12:59:13.000Z
|
homeassistant/components/whois/diagnostics.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""Diagnostics support for Whois."""
from __future__ import annotations
from typing import Any
from whois import Domain
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import DOMAIN
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, entry: ConfigEntry
) -> dict[str, Any]:
"""Return diagnostics for a config entry."""
coordinator: DataUpdateCoordinator[Domain] = hass.data[DOMAIN][entry.entry_id]
return {
"creation_date": coordinator.data.creation_date,
"expiration_date": coordinator.data.expiration_date,
"last_updated": coordinator.data.last_updated,
"status": coordinator.data.status,
"statuses": coordinator.data.statuses,
"dnssec": coordinator.data.dnssec,
}
| 31.892857
| 82
| 0.753639
|
6fe18832819d5f17cc7866edfe0bb749be63d394
| 73,968
|
py
|
Python
|
src/NZGBplugin/Resources.py
|
strk/gazetteer
|
7c1a46827aaef47ffebe10f7d9dde1bbf477e6fe
|
[
"MIT"
] | null | null | null |
src/NZGBplugin/Resources.py
|
strk/gazetteer
|
7c1a46827aaef47ffebe10f7d9dde1bbf477e6fe
|
[
"MIT"
] | null | null | null |
src/NZGBplugin/Resources.py
|
strk/gazetteer
|
7c1a46827aaef47ffebe10f7d9dde1bbf477e6fe
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
################################################################################
#
# New Zealand Geographic Board gazetteer application,
# Crown copyright (c) 2020, Land Information New Zealand on behalf of
# the New Zealand Government.
#
# This file is released under the MIT licence. See the LICENCE file found
# in the top-level directory of this distribution for more information.
#
################################################################################
# Resource object code
#
# Created: Thu Nov 8 14:21:46 2012
# by: The Resource Compiler for PyQt (Qt v4.7.3)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x04\xf4\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x04\x71\x49\x44\
\x41\x54\x58\x85\xcd\x97\x7f\x4c\xd4\x65\x1c\xc7\x5f\xcf\xdd\x17\
\xee\x98\x23\x17\xbf\xa3\xa0\xe9\xd2\x74\x44\x04\x59\x9c\x78\xf8\
\xa3\xc9\x86\x5a\x0c\x4b\xd8\x5a\xfe\x28\x17\x96\x6d\x16\x35\x0b\
\x9d\x0b\x93\x8c\xe1\x56\x49\x63\x69\xb5\xe9\x14\xe1\x1f\x28\x21\
\x45\x66\x2a\x92\x29\x2a\x12\xb7\xb1\xeb\x0a\xcf\xc5\xcc\x0d\x44\
\xf9\xa1\xcb\x23\xef\xc7\x17\x9e\xfe\x80\x43\xe8\x80\x3b\x8e\x66\
\xbd\xb7\x67\xbb\xe7\x79\x3e\xcf\xf3\x7e\x3d\xdf\xef\xe7\xfb\xfd\
\xde\x47\x48\x29\x19\x29\xb1\x43\x44\x00\xd9\x40\xd2\x50\x8b\x62\
\x6a\xea\x04\x4c\x43\xad\x42\x6e\x97\x37\x47\xf9\x8d\x04\x10\x05\
\x22\x4b\x48\xb1\x47\x22\xc3\x00\x14\xad\x22\x43\xf4\xa1\x62\x2a\
\xee\xbd\xf6\x1e\xa9\xf6\xab\x02\x40\x20\xba\xa5\x90\x6f\xc9\x7c\
\x59\xe9\x01\x20\x76\x88\xaf\x81\x0d\x81\xda\x40\x99\x9b\xbc\x45\
\xa4\xcd\x5c\x4e\x7c\x78\x02\x3a\x45\x3f\x15\x7f\x1c\xaa\x1d\x73\
\x57\x0b\x27\xdb\x6a\x29\x6e\x2c\x92\xce\x7e\xa7\x00\xbe\x91\xdb\
\xe5\x1b\xc3\x00\xa2\x40\x64\x21\xa9\x98\x1d\x3a\x97\x83\x19\x95\
\xcc\x09\x8b\x9b\x92\xe9\x78\x6a\xed\xb6\xb0\xee\x48\x16\xd6\x9e\
\xdf\x40\x90\x2d\xf3\x65\xa5\xe0\x23\x22\x04\xc2\x12\xa0\x0d\x08\
\x3d\xb3\xd6\x24\x26\x6b\xee\x74\x40\xa0\xee\x5e\xff\xd8\x95\x6a\
\x56\x57\xaf\x04\xa0\x2c\xb3\x8a\x15\xb3\x32\x3d\x20\x16\x95\x26\
\x49\x57\xbf\xab\x47\x22\xe3\x34\x40\xb6\x44\x86\xe5\x26\x6f\x99\
\xb4\x39\x0c\x9a\xd7\x9d\xfa\xd3\xe7\xf8\x39\x61\x71\xe4\x26\x6f\
\x11\x43\x79\x96\xad\x61\x30\xd3\x49\x9b\xb9\x7c\xd2\xe6\x6e\xcd\
\x4c\xb5\xb1\xee\xf6\x7a\x9a\x9d\x2d\x3e\xc5\x8f\xf0\x4a\xd2\x00\
\x49\x8a\x56\x91\xf1\xe1\x09\x7e\x03\xcc\xd0\x45\x63\x74\x65\xd0\
\xde\x15\x4e\x6f\xaf\xf4\x1a\x1f\x1f\x9e\x80\xa2\x55\x24\x90\xa4\
\x00\x51\x21\xfa\x50\xe1\x6f\xb6\x1f\xbb\x52\x0d\x40\x34\xf0\xc5\
\xa7\x3f\xf1\xf0\xd2\xf3\xc3\x73\xcd\xd7\x1b\x3d\xe2\x57\xcc\xca\
\x44\xa7\xe8\x09\xd1\x87\x8a\x9b\x7d\x37\xa2\x14\xbf\x5c\x47\xc8\
\x9d\x70\x00\x3c\x06\x4d\x57\xef\x75\x77\x37\x16\x79\xc4\xdf\x7a\
\x7f\xf4\x15\xd2\x4c\x15\x60\xaa\xf2\xe9\x0a\x9c\xae\x3b\x41\x90\
\x3e\x88\xf9\x0b\x52\x3d\xe6\xca\x32\xab\x86\x7f\x9b\x5c\x66\x6a\
\x6e\xef\xc7\xda\x70\x15\x80\xf8\x94\x18\x96\x46\xaf\xe4\x69\x75\
\x89\xff\x00\xf6\xbb\x77\xd9\xfc\xce\x5a\x6c\x7d\x36\x4a\xcb\x8f\
\x63\x48\x31\x8e\x9a\x77\x3f\xe7\x6d\xce\x0e\x5a\xa6\x5d\x20\xc6\
\xaa\xc7\x3a\x34\x67\x9c\xfe\x22\xdb\x62\x8b\xd1\x6a\xc7\xdf\xdf\
\xeb\x2d\xd8\x55\xb8\x0d\xc3\xfc\x3b\x1c\xfe\xae\x8f\x57\xd7\x2c\
\xe3\xe2\xf9\x73\x63\xc6\x39\xf4\xb7\xd0\xda\xb5\xa3\xce\xb4\x40\
\xb7\x78\x42\x73\xaf\x00\xbf\x5a\xcc\x94\x1d\xfa\x8a\xdd\x9f\xff\
\x85\xd1\x08\xdf\x56\xda\xc6\x85\x98\x3b\x10\x47\x9e\xb6\x10\x5d\
\x5f\xbc\xb7\x33\xf9\x06\x20\xa5\xe4\xdd\x4d\xab\xf9\x64\xa7\x83\
\xc8\xc8\xc1\xb1\xb1\x20\x7e\xb1\x35\xd3\x70\xdd\x84\xa9\x49\x1d\
\x5c\x37\xe0\xfb\x5b\x11\x26\xc8\x81\x03\xfb\xf6\xa2\xd5\xb6\x91\
\x93\x33\x30\x6a\xdc\x0d\xb1\x2a\x6b\x19\x6f\xe7\x16\xd0\xfe\xb8\
\x95\x80\x44\x3b\x97\x35\x1a\x32\xca\x0b\x08\x36\x26\xd0\x98\xf7\
\x31\xb3\x07\x12\xfd\x07\xb8\x79\xa3\x93\xc2\x9d\x79\x9c\xae\xb3\
\x21\xc6\xf8\x37\x60\x34\x42\x75\x95\x8d\x7d\xfb\xb7\x62\x3a\xaa\
\x62\xb5\x0c\x80\x46\xc3\xad\xd9\x4d\xcc\xbd\x98\x48\xef\xcb\x7d\
\x90\xe2\x93\xff\xd8\x00\x5b\x3f\xd8\xc0\xfa\xd7\x9c\xc4\x4f\x70\
\x3b\x0d\x06\x30\x18\x1c\xc3\xfd\x8e\x8e\x7e\x2c\x16\x33\x25\x25\
\x66\x7e\xac\x9f\xe1\xf1\xb4\xf8\x0c\x50\x77\xf2\x38\x67\xcf\x9c\
\xe0\xd0\x41\xa7\x4f\x1b\xb8\x15\x1d\x3d\xd8\x1a\x1a\xc0\xee\xf2\
\x7d\x9d\x47\x12\xea\xf4\x7a\x1e\x89\x79\x94\x85\x8b\x82\xa9\xa9\
\x99\x14\x83\x5f\xf2\x00\x30\xa6\x2e\xe6\xf4\xd9\xcb\x6c\xce\x2b\
\xe7\xc3\xfc\x59\xcc\x7b\x26\x98\x4b\x97\xee\x23\x80\x5b\xe9\xcb\
\x5f\xa0\xfe\x9c\x95\x27\x9f\x7a\x85\xda\x5a\xdf\x36\x73\xb9\xe0\
\xda\xb5\x7f\x09\xc0\xad\x88\x88\x48\xaf\x9b\xb4\xb7\x43\x7e\xbe\
\x42\x4c\x6c\x10\xd6\x2b\x89\x2c\x4d\x4b\x9f\x14\x40\x67\xaf\xbd\
\x47\x3a\x54\xfb\x98\x01\x77\x1d\xfd\xe3\x2e\xae\xaf\x87\x97\x56\
\x4d\x23\xee\x89\x20\x3a\x3a\xd7\x50\x75\xf4\x12\x35\x3f\x98\x98\
\xf7\xac\x61\x42\x53\x87\x6a\xa7\xd7\xde\x23\x81\x4e\x05\x30\xa9\
\xfd\x6a\x82\xb9\xab\x85\x79\x0f\x25\x7b\x04\x07\xe9\x46\xbf\xcc\
\xef\xdc\x81\x83\xa5\x82\x3d\x7b\xa6\x21\x65\x08\xeb\x5f\xdf\x4c\
\xf1\x97\xeb\x08\x0e\x7e\xc0\xdb\x61\x87\x65\xee\x6a\x61\xa8\x56\
\x30\x69\x18\xac\x58\x38\xd9\x36\xf1\x8d\xb6\x58\x60\xe3\x46\x3d\
\x31\xb1\x3a\x4e\xd5\xa5\xb3\xeb\xb3\x23\x5c\xf8\xf9\x0f\x72\xde\
\xdc\x34\x29\xf3\x7f\x78\x99\x34\x40\x85\x40\x74\x17\x37\x16\xc9\
\xd6\x6e\xcb\x98\x0b\x4a\x4a\x14\x96\x3c\x37\x9d\xe9\x0f\xbe\xc7\
\x85\xa6\xdf\x39\x50\x56\x4b\xea\xc2\xf1\xbf\xf1\x13\xa9\xb5\xdb\
\x42\x71\x63\x91\x14\x88\x6e\xa0\xc2\x6b\x61\x72\xf4\xfb\xc3\xa8\
\xaa\xca\xf3\x19\x2b\x09\x08\x08\xf0\xcb\x74\xa4\xb9\x47\x61\xf2\
\xbf\x28\xcd\xdc\xfa\x4f\x8b\xd3\xe1\x81\xfb\x5c\x9e\xff\x0d\xfd\
\x43\xeb\xff\xde\x62\xb8\x33\x00\x00\x00\x00\x49\x45\x4e\x44\xae\
\x42\x60\x82\
\x00\x00\x06\x1b\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x05\x98\x49\x44\
\x41\x54\x58\x85\xcd\x97\x69\x6c\x54\x55\x14\xc7\x7f\xf7\x2d\xd3\
\x37\x75\x98\xda\x76\x5a\xa6\x2e\x2d\x54\xa1\xe0\x50\x1b\x6a\x23\
\x91\x60\x88\x26\x40\x20\x14\xa3\x86\xba\x7c\x50\x31\x46\x12\x42\
\xc2\x62\xa2\xa8\x91\x02\x89\xc4\x10\xf7\x05\xd4\x20\x04\xac\x42\
\x20\x51\xc3\xa6\x50\x37\x14\x15\x2a\x34\x22\x5b\x41\xda\x82\x2c\
\x1d\xe8\x02\x1d\xca\xcc\x9b\x79\x6f\xe6\xfa\x61\xda\xd2\x3a\xd3\
\x0d\x8c\x7a\x92\xf7\xe5\x6e\xff\xdf\x39\xf7\xbc\x7b\xef\x11\x52\
\x4a\xba\x9a\x58\x2c\xb2\x81\x32\xa0\xb8\xfd\xf3\x72\x6d\xe6\x07\
\xaa\xdb\xbf\x0d\xb2\x5c\x9e\xef\xa6\xd7\x15\x40\x2c\x11\xd3\x85\
\x14\xcb\x25\xd2\x03\xa0\xa9\x9a\xcc\x30\x32\xc5\xb5\xa8\xb7\x98\
\xcd\xd2\x8e\xda\x02\x40\x20\x9a\xa4\x90\xb3\xe4\x42\xb9\x31\x01\
\x40\x2c\x16\x1f\x00\x4f\x3b\x54\x87\x9c\x3b\x66\x81\x98\x90\x3f\
\x85\xc2\xac\x22\x52\x34\xe3\x5a\xf4\x09\xdb\x26\x07\x1a\xf7\x53\
\x59\xb7\x8d\x37\xf7\xbc\x22\x23\xd1\x88\x00\x3e\x94\xe5\x72\x66\
\x27\x80\x58\x22\xa6\x23\xd9\x30\x3c\x73\x24\x6b\xa6\x6d\x64\x84\
\xc7\xd7\xe7\xc2\xa2\xa9\x11\x00\xe9\xc9\xea\x37\x4c\x4d\xd3\x21\
\x1e\xdf\x34\x9d\x63\xcd\x47\x40\x50\x26\x17\xca\x8d\x82\x45\x64\
\x0b\xc4\x21\x5d\xd5\x33\x77\x3e\x56\x2d\xfa\x23\xee\x9c\x3f\x1b\
\x7d\xdb\x66\x00\x22\x93\x4b\x31\xdf\x78\x77\x40\x10\xe3\xd7\x16\
\x4b\x2b\x6a\x35\x4b\xa4\x4f\x03\xca\x24\xd2\x33\x77\xcc\x82\x04\
\xcf\x8d\x97\x17\x25\x2c\x20\x82\x41\x1c\xeb\x2a\x10\x81\x56\x00\
\x52\xd6\x57\x10\xc2\xc0\xe9\x71\xf5\x28\x6a\xbe\x78\x65\x9d\x11\
\x1e\x1f\x73\xc7\x2c\x10\xcb\x7e\x5e\xe2\x01\xca\x14\xe2\x99\xce\
\x84\xfc\x29\xfd\xf6\xe2\xef\xa6\x3b\x63\x9c\x89\x9d\xc5\x94\x66\
\xbf\xc6\x77\xd1\x2a\xd6\x80\x62\x4d\xd5\x64\x61\x56\x51\x42\xb6\
\x77\x25\xef\x66\xa1\x60\xe7\x16\x58\x53\x4a\xb1\x97\xbe\x4e\x6b\
\xf8\x0c\xe1\x58\x06\xe9\x38\x71\x3a\xaf\x0c\xd5\xea\x76\x25\x4c\
\x2f\xcc\x2a\x42\x53\x35\x69\x47\xed\x62\xc1\x22\xfc\xd9\xd7\x0d\
\x1e\x7c\x74\x96\x3f\x61\xa0\x56\xb7\x0b\x3b\x7f\x5c\x52\x86\x64\
\x49\x58\xf5\x8b\x4d\xde\xc8\x28\x8e\x68\x0a\xd9\xfe\x2d\x18\xdb\
\x17\x03\xd0\x36\xeb\x6b\xa4\x91\xd6\x6d\x7e\xc1\x72\x2f\xe7\x2f\
\x9f\x3b\xa7\x25\x77\x11\x84\xd9\x8a\xf3\x8b\x79\x80\xc4\x9c\x54\
\x8e\xe5\x2b\xed\xd6\x9f\x2c\xfb\xbd\xf9\x61\xc2\xc7\xd7\x93\xf7\
\xdd\x3b\xe8\xaa\xde\x3e\x6f\x6a\x4f\x12\x71\x27\x7b\xea\x90\x46\
\x1a\x97\xe6\xff\x8a\x7e\x28\xee\x89\xb1\x7d\x31\xe6\xc4\x85\x58\
\xa3\xa6\xc5\x01\xdb\xda\x23\xe0\x8a\x83\xe8\x07\x37\x31\xfc\xcb\
\x45\x44\x54\x93\x9a\x7b\x67\xa2\x0f\x7f\x0c\x55\xb3\x69\xb3\x6b\
\xb9\x55\xbb\x65\xe0\x00\x1d\x66\xf9\xa6\x62\xf9\xa6\x76\x03\x91\
\x6e\x2f\x4a\xc3\x41\x00\x62\x39\xa3\x10\xad\x0d\xa0\xa8\x9c\x9d\
\xfc\x0c\x47\x8a\x6e\xc2\x08\x5d\x47\x38\xa5\x1a\x4d\x28\x78\x19\
\x0a\xb1\xab\x88\x40\x4f\x20\x8e\x7d\x9f\xe0\x5c\xf7\x24\x22\x1a\
\x89\x47\x22\xe0\x27\xf4\xc8\x2a\x2e\x8c\x7e\x90\xd3\xda\x31\x1c\
\xb2\x09\x4c\x48\x31\xb3\x49\x4f\x75\xe3\x55\xf3\xa0\x97\xc3\x5c\
\x49\xd6\xf8\x6d\x25\x5c\x0a\xf4\x00\x52\x30\x11\xe9\xbe\x72\x3f\
\x49\xb7\x17\xab\x60\x22\x51\x35\x8c\x88\x0a\x40\x41\x4d\xbb\x84\
\x2b\xd5\xc5\xcd\x5a\x1e\xa2\x8f\x9b\x24\x01\xa0\xe6\x08\xac\x5e\
\x09\x6f\x2c\x83\xef\xbe\x49\x9c\x20\x5d\x59\x58\xbe\x52\x62\xe9\
\xb9\xc4\xd2\x73\xb1\x7c\xa5\x48\x57\x16\x6e\x99\xc6\x08\x51\x88\
\x66\x65\x63\x29\x2a\x86\xea\xe8\x5d\xb9\xdd\xba\x6d\x41\x20\x00\
\x0f\x4c\x03\x7f\x03\x14\x14\x80\xaa\x42\xd5\x2f\x30\xef\x59\x70\
\x74\x59\x2f\xf4\xe0\xbb\x98\x93\xca\x01\xb8\x68\x68\x58\xa1\x0b\
\xa8\xa1\x34\xd2\x33\x14\x64\x2c\x4c\xcc\x1c\x44\x86\xe2\x05\xb5\
\x6f\x80\xce\x08\xc4\x62\x50\x76\x7f\x90\x96\x66\x90\x12\x6a\x6a\
\x60\xc5\x0a\x38\x7d\x0a\x5e\x2e\x87\xea\xbd\x89\x91\x90\xae\x2c\
\x5a\x38\x4f\x73\x6a\x3d\x47\xc5\x3e\xfe\x3c\x19\x22\x1c\xd1\x18\
\xe6\x18\x8a\xa1\xf6\x43\xbd\x6b\x04\xb6\x6e\x0e\x50\xb5\xe7\x7b\
\xc0\x07\xc4\x7f\x9b\xcb\x97\xa1\xa2\x02\x4a\x4a\xc0\x30\xe0\xa7\
\x5d\x30\x7b\x0e\x9d\xfb\xda\x18\x6a\xe1\x62\xea\x29\x0c\x04\x2e\
\x8f\x42\x8e\xcb\x20\x57\x2f\x04\x99\x28\xd4\x67\x04\x4a\xef\x73\
\xf3\xf9\xe6\x7b\x31\x8c\x93\xa8\xea\x8f\xc0\x95\x73\x7d\xef\x5e\
\x58\xb9\x12\x4e\x9d\x80\x85\xcf\x43\xed\x1f\xf1\xf6\x53\x5a\x2d\
\x86\x04\x45\x51\xc8\x8b\x14\xa1\xeb\x03\x7f\xbb\x74\x4b\xc2\xf1\
\xf7\xb8\x38\x71\xf6\x1e\xee\x1a\x9b\x89\x10\x95\xc0\xe9\xce\xbe\
\x96\x16\xf8\xe8\x23\x38\xf0\x3b\x6c\xf8\x14\xde\x5e\xd5\xc4\xa0\
\x94\x8b\xc4\x14\x05\x3d\x92\xcf\x20\x35\x63\xc0\xe2\x09\x00\x00\
\x0e\x87\x60\xf3\x57\xb7\xb1\xfc\xc3\x71\x68\xda\x41\x54\xb5\x0a\
\xb0\x80\x78\x9e\xec\xdc\x09\x1f\x7f\x0c\x8d\xc7\x3c\xac\x7e\x6e\
\x3c\x67\xfc\x6e\x3c\xa1\x1b\xaf\x4a\x3c\x29\x40\x87\x3d\xfc\x68\
\x3a\x47\xeb\x26\x70\xeb\x30\x1d\x21\x76\x00\x8d\x9d\x7d\x0d\x0d\
\xf0\xfe\x0a\x38\x59\xeb\xe0\xd7\xf7\x4a\xd8\xbe\xa5\x7f\x09\x37\
\x20\x00\x80\x8c\x4c\x95\xdd\xfb\x46\xf3\xc2\x4b\x77\x20\x94\x3d\
\x28\xea\x6f\x74\x9c\xab\xb6\x0d\xdb\xb6\xc2\x96\x2d\x30\x38\xcb\
\xc2\xa1\xd8\xff\x3c\x40\x87\x3d\x34\xc3\xcd\xa7\xbb\x6f\x26\xdd\
\x13\x40\x51\x2a\x81\x8b\x00\xe8\x3a\xdc\x39\xc6\xe2\xee\x71\x41\
\x9c\x29\x2a\xaa\xe8\xe5\xd0\xef\x05\xc0\xdf\x62\x36\xcb\xb0\x9d\
\xfc\x35\x13\xb1\x62\x34\x2a\xe7\xf0\xdc\xa0\xb1\xe1\xa4\x64\xf2\
\x13\x6e\x84\xf8\x01\x21\x8e\x32\x34\x3f\xca\x6b\xaf\x5e\x22\x16\
\x8b\x12\x0c\x06\x91\xd6\x65\xa2\xd1\x68\x9f\xa2\x61\xdb\xa4\xc5\
\x6c\x96\x80\x5f\x01\xaa\xed\xa8\x2d\x0e\x34\xee\x4f\x3a\xd8\xa1\
\x2b\xe4\xb8\xdd\x5c\xaf\x65\x22\xad\x54\xe6\x2c\xb7\x59\xb3\xe3\
\x76\xdc\x69\x7e\xde\x7a\x6b\x37\x75\x75\xc7\x69\x6d\x6d\xa5\xbe\
\xbe\x9e\xbc\xbc\x5c\xe6\xcf\x7e\xaa\x4f\x80\x03\x8d\xfb\x69\xaf\
\x15\xaa\x15\xe2\x15\x0b\x95\x75\xdb\x7a\x9c\x90\xe3\xc8\xc4\xeb\
\xcc\x64\x48\x73\x09\x79\x6d\x25\x94\x8e\x1d\xc2\x89\x33\xe3\xc9\
\x1d\x92\x43\x20\xd0\xc6\x9e\xaa\x2a\x66\xcc\x98\x41\x20\x10\xa0\
\xa2\x62\x2d\xf5\xb5\xc7\x7a\x05\xe8\xa2\x55\xdd\xef\x67\xf9\x85\
\xa0\x45\x7a\xaa\xde\xe3\xa2\x87\x0f\xee\x67\xeb\xa6\xcf\xa8\xa9\
\x39\xcc\xd2\x65\x6f\x32\xd8\x9b\xfc\xd7\xfc\xfb\xb3\xfc\xaa\x0b\
\x93\xab\xb1\xa4\x85\xc9\xff\xa2\x34\xeb\xb0\xff\xb4\x38\xed\x6c\
\xf8\x97\xcb\xf3\xbf\x00\x85\xf4\x78\x5b\x3f\x48\xd0\xc1\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x06\xcb\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x06\x48\x49\x44\
\x41\x54\x58\x85\xc5\x97\x7f\x50\x93\xe7\x1d\xc0\x3f\x4f\xf2\x06\
\x02\x54\xa7\x24\x50\xc4\xb5\x73\x38\x50\xc4\xe0\x95\x73\xb5\x4c\
\x11\x75\x5a\x57\x75\xae\xea\x40\xac\x13\xd6\xae\xd7\xed\xac\xb0\
\xfe\x3a\x67\xb7\x0e\xa8\xab\xb7\x5e\xef\x7a\xf3\x8f\xad\x3b\x68\
\x6f\xc5\xd2\x0e\x24\xe2\x3a\xec\x36\xab\xb6\x82\x80\x62\x7b\xc2\
\x20\x09\x0a\x56\x5a\xd4\x2a\x02\x09\x88\x20\xf9\xf1\x26\xcf\xfe\
\x10\xa3\x19\x4a\x95\xf4\xb6\xef\xdd\x73\x97\xef\x9b\x6f\xde\xcf\
\xe7\x79\xf3\x3c\xc9\xf3\x15\x52\x4a\x6e\x0e\xf1\xb2\x88\x06\x32\
\x81\x94\x91\x11\x43\x70\xd1\x05\x34\x8e\x8c\x0a\x59\x20\xbb\x03\
\x78\x37\x0b\x88\xed\x22\x43\x48\xf1\x86\x44\x1a\x01\x14\xad\x22\
\x23\xf5\x06\x11\x0c\xdd\xe1\xb4\x4b\xd5\xab\x0a\x00\x81\xe8\x95\
\x42\x6e\x96\xf9\xd2\x3c\x4a\x40\xbc\x2c\x8a\x80\xa7\x42\xb4\x21\
\xf2\x99\x79\xdb\xc4\xb2\xb8\x15\x98\xa2\xe6\x10\xaa\xe8\x83\xe1\
\xe3\x52\x9d\x58\x7a\x9a\x39\xd8\xf1\x4f\x76\x1e\x7f\x55\xba\xbd\
\x6e\x01\x14\xcb\x02\xf9\x73\xbf\x80\xd8\x2e\x32\x90\x54\x24\x18\
\x12\xd9\xb5\xda\xcc\x4c\x63\x52\x50\xd0\xdb\xc5\xa9\x5e\x1b\x39\
\x55\x19\xb4\xdb\x4f\x82\x20\x53\xe6\x4b\xb3\xa0\x90\x68\x81\xb0\
\xe9\xb4\x3a\x43\x4d\x76\xa3\xb8\x5b\xb8\xdb\x05\x21\xa1\x37\xf2\
\x8f\xbf\x38\x40\xcb\xa5\x26\xce\xf4\xb5\x03\x10\x37\xe9\x3b\xcc\
\x30\xce\x62\xf9\xf4\x55\x68\x85\x96\x53\xbd\x36\xd2\xdf\x49\x91\
\x1e\xaf\xc7\x2e\x91\x49\x5a\x16\xf1\x38\xf0\xe3\xe7\x53\x7f\x23\
\xd6\xcc\xcc\xbc\xeb\x59\x69\x15\xf8\xe8\xd0\x00\x97\xc3\x2d\x6c\
\x7a\x7f\x0d\xfb\xcf\x7c\xc0\xf4\xc9\xf1\x5c\xf5\x0c\x51\x66\xdb\
\xc5\xe1\xce\x83\xec\x3d\x55\x4e\x55\x7b\x25\xf1\x91\x33\x98\x1b\
\x3b\x0f\xd5\xa7\x8a\xfa\x73\x35\xe1\xc0\x59\x85\x6b\x2b\x9d\x65\
\x71\x2b\xee\x1a\x7e\x3d\x74\x26\x2b\xcb\x77\x2f\x46\xa8\x82\xcf\
\x73\xed\x84\xeb\x22\x00\x48\x9f\xb6\x94\x27\xaa\xd6\x03\xd7\x1e\
\xff\xfa\xca\x95\x1c\xfc\x49\x03\xcb\xe2\x56\xf0\xda\xd1\xed\x00\
\x29\x1a\x20\x45\xd1\x2a\xd2\x14\x35\x67\xdc\x02\x7f\xac\xdb\x81\
\xc7\xe5\xc6\xed\x75\x51\xdd\xda\xec\xbf\xbe\x66\x46\x26\x09\x86\
\x44\x7f\xee\xf2\xba\xc8\xdb\xff\x24\xa6\xa8\x39\x28\x5a\x45\x5e\
\x17\x88\x89\xd4\x1b\xc4\x78\x57\xbb\x4f\xfa\x38\x76\xbe\xd6\x9f\
\x97\xb5\x95\xd2\x74\xfc\xc6\xd6\x4e\x8e\x7e\x20\xa0\xbe\xa5\xbb\
\x09\xd5\xa7\x32\xb2\xbd\x63\x34\xe3\xa2\xde\x14\x1a\xa1\x21\x26\
\x62\x8a\x3f\xff\x74\xa8\x86\x73\x91\x9f\xf8\x73\x63\x78\xd4\x28\
\xe1\xae\xa1\x8b\xfe\x5c\x09\x56\x00\xe0\x85\xd4\x97\xd8\xfa\x51\
\x2e\x6e\xaf\x4a\xfc\x77\xa7\xb2\x4f\x5b\x8a\x3c\xad\xa0\xbf\x67\
\x88\xfa\xfe\xba\x80\x5a\x9d\x46\xc7\x37\x27\xde\xff\xf5\x0a\xac\
\x4f\xda\xc4\xea\x84\x75\x7c\xe8\xaa\xe6\xef\x51\x7f\xc0\x78\xc1\
\x4d\x55\x54\x2e\x13\x34\x0a\x83\x87\xec\x01\xb5\x59\x49\xd9\x84\
\x6a\x6f\xec\xdb\xa0\xbf\x02\x00\xaf\xd7\x4b\x43\xfb\x09\xfe\xf6\
\x71\x31\x8e\x92\x4e\x64\x17\x18\x5b\x16\xf3\xed\x8e\x95\x7c\xd1\
\xd5\xe9\xaf\x9b\x3e\x39\x9e\x82\xf4\x57\x03\x3e\x7b\xd7\x4f\xa0\
\xbb\xfb\x12\x36\x6b\x0b\xad\x56\x0b\xad\xad\x16\x6c\x96\x16\x3e\
\x3b\xdd\xc6\xbd\x53\x63\x50\xf1\x10\xfe\x90\x24\x65\x63\x1b\x53\
\xfb\x52\xb1\x1d\x6b\x40\x72\x6d\x41\x26\x1a\x67\x53\xb6\xb6\x0a\
\x43\x98\xf1\xce\x05\xac\x96\x66\xfe\xdd\x74\x82\x56\x9b\x85\x56\
\x6b\x0b\x36\xab\x05\x21\x04\xa6\xe4\x64\x4c\x26\x13\x4b\x16\xa5\
\x93\x97\xbb\x85\x59\xb3\x92\xd0\xeb\xf5\xe4\xe5\x3e\x8d\x35\xb6\
\x83\x7e\x9d\x8f\x19\x9a\x10\xca\x6d\xa5\x00\x6c\x98\x9d\xc3\xeb\
\x4b\xdf\x20\x4c\x17\x3e\x8a\x31\xa6\x80\xc3\x61\xe7\x57\xcf\xe7\
\xb2\x72\xd5\x0f\xd9\xf6\xe2\x8b\x98\x4c\xc9\x44\x47\x47\x07\xd4\
\x5c\x51\x2f\x33\xec\x75\xe1\xea\x0f\xc5\x62\xb1\x60\x58\x64\xc4\
\x73\xe9\x3e\x3a\xac\xe7\x70\xa9\x4e\x0a\x16\xfe\x9e\x67\xe6\x6d\
\xbb\x2d\x63\xcc\x35\xb0\x30\x7d\x09\xbb\xf7\xfe\x83\x9a\xea\x6a\
\xf4\x7a\xfd\x28\x38\x40\xbf\xe8\xc5\x11\x7a\x96\x33\xa2\x85\x96\
\x66\x0b\xc4\xde\x4f\xde\xa4\x9f\xb2\xaf\xed\x5d\x76\x2e\x2f\x1e\
\x13\xfe\x95\x02\x00\x0b\xd2\x16\x51\xf2\xae\x99\xac\xcc\x0c\xea\
\xea\x6a\x03\xde\x73\x38\xfb\x19\x50\x2e\x22\xc5\x00\x43\xc3\x9d\
\x84\x84\xea\xd8\x95\xb2\x93\x04\xdf\x03\x38\x86\xed\x6c\x98\x9d\
\xf3\x55\xb7\x0f\x6e\x17\x5c\x50\x3a\x09\x95\x20\x34\x1a\xfa\x9a\
\x3c\x98\x4c\xc9\x00\x78\x7c\x1e\x12\x8d\x49\xe8\x34\xba\xe0\x05\
\x8e\xd5\xd7\xf2\xf8\xa6\x4c\xca\x2b\xcc\x2c\x58\x90\x06\xc0\x95\
\x2b\x3e\x3e\x17\x36\x22\x42\x06\xf0\x69\x34\xe8\xdc\xf7\x71\xda\
\xd6\x41\x62\x92\x09\x00\x45\xa3\x50\xfa\xe8\xde\x3b\x9a\xc4\x98\
\x02\x47\xeb\x8e\xf8\xe1\xf3\xe7\x2f\x00\xc0\xa9\x7a\x19\x88\x38\
\x85\x56\x73\x19\x9f\x50\x18\x16\x3a\x22\x9d\x31\x58\xad\x16\x66\
\x8d\x08\x08\x04\x13\x42\x26\x06\x27\x50\x5f\x5b\x13\x00\xf7\xaa\
\xd0\x23\xcf\x72\x4e\x7b\x02\xb7\xd2\xcf\xc0\xb0\x8f\xb6\x4f\xec\
\x4c\x73\xcf\x44\xab\x15\x58\xad\x56\xbf\x40\xc3\xf9\x3a\x5e\xa9\
\x7d\x89\x93\xbd\xd6\xf1\x09\xd4\xd5\x56\xf3\x44\xf6\x7a\x76\x9b\
\xf7\xf8\x67\xde\xe5\x39\xcb\x55\xcd\x79\x34\x42\xe2\x1a\xd0\xf2\
\x8b\x35\xcf\xb2\x71\x49\x16\x8d\x47\x3e\x25\x44\x07\xa7\xdb\xdb\
\x48\x4c\x4c\x62\x47\xdd\x6f\x79\xa4\x2c\x8d\xd7\x1b\x76\xf0\xbd\
\xb7\x4d\xbc\xdf\x66\xbe\x15\xe2\xf6\x02\xb5\x47\x0e\xf3\xb3\xec\
\xac\x00\xf8\x45\xbb\x93\x41\x71\x05\x9f\x2b\x0c\x8f\xcb\xcd\x53\
\x99\x79\xb8\x23\x9c\x94\xed\xd9\xc7\xc6\xc7\xb2\x78\xb3\xb8\x88\
\x29\xb1\x53\xd1\xe9\x43\x28\x69\x2e\x0a\xb8\xdf\x9f\x4f\xec\x1c\
\x53\x40\x01\xba\x1c\x4e\x7b\xb4\x4b\x75\x8a\xe3\xf5\x47\x79\x32\
\x67\x03\x15\x7b\x2a\xfd\x70\x8f\xea\xc3\x2e\xba\x91\x2e\x1d\x43\
\x61\x0e\xf2\xd6\x3e\x4b\xdf\xc4\x4b\xe4\x17\x54\x90\x16\x97\x4a\
\x49\xa9\x99\xcc\xb5\x2b\xf8\xfe\xb2\x1f\xa0\x68\x14\x26\xeb\x0d\
\xf4\x5e\xed\xf1\x03\xee\x8d\x18\xdd\x56\xb8\x54\x27\x0e\xa7\x5d\
\x02\x5d\x1a\xa0\x51\xf5\xaa\xc2\xd2\xd3\xcc\x85\x2f\xcf\xa3\xd5\
\x6a\x31\x1a\x6e\xfc\x5e\xeb\x14\x0d\xd1\xf7\x4c\x20\xd4\x13\x4e\
\xee\x8f\x5e\xa0\x7b\x42\x37\xcf\x15\xbd\xc6\xaa\xb8\x54\x00\xe6\
\xa7\xa5\x53\x5e\xf9\x01\x73\x1f\x7c\x08\x80\x5f\x3e\xb8\xd5\xff\
\x6f\xf7\x8d\xd0\x49\x3c\x3d\xf7\xb9\x51\x02\x96\x9e\x66\x46\x7a\
\x85\x46\x28\x64\x0b\x85\xc8\xad\x07\xf2\x65\xdf\x90\x94\x7f\x2a\
\x7a\x5b\x4e\x99\x12\x2b\x2d\xd6\x93\xd2\xad\x4a\xe9\x56\xa5\x1c\
\x18\x1c\x96\x8b\x97\x3e\x2c\x17\x2e\x5c\x27\x2b\x5b\x3b\x65\xdf\
\x90\x1c\x35\x1c\x83\x3e\xff\xeb\xcf\x7a\x7b\x64\x49\xa3\x59\x7e\
\xd9\x3f\x78\xcb\xda\xad\x07\xf2\x25\x85\x48\x0a\xd9\x72\xcb\x63\
\x79\xf9\x5f\xdf\xe1\x77\x05\xbf\xe6\xc3\x03\x87\xf8\xd6\xb4\x69\
\xac\x5b\xfb\x28\x1e\x11\x86\xb9\xc2\x8c\xa2\x04\x77\x84\xf8\xef\
\x63\xb9\x22\x0b\x64\xb7\xd8\x2e\x36\xbb\xbd\xee\x8a\x9c\xaa\x0c\
\x76\xad\x36\x93\xf5\x58\x36\x42\x08\x96\x3f\xbc\x94\xf8\x84\x04\
\x26\x4d\x36\xf0\x56\x49\xd9\xd7\x02\xcf\xa9\xca\xc0\xed\x75\x0b\
\x04\x9b\x65\xbe\xec\x1e\xb3\x35\x6b\x3f\x7c\x92\x43\xfb\xff\x45\
\xf1\x5f\xde\x1b\x37\xfc\x8e\x5a\xb3\xeb\xf1\x7f\x6d\x4e\xfd\x17\
\xfe\xc7\xed\xf9\x7f\x00\xad\x6d\x0d\xd8\x45\xb9\xf8\x1b\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x06\x31\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x05\xae\x49\x44\
\x41\x54\x58\x85\xcd\x97\x7b\x50\x94\x55\x18\xc6\x7f\x67\x77\x3f\
\x96\x05\xf6\x02\x84\x22\x72\x2d\x45\xc5\xeb\xa2\x28\x13\x98\xc8\
\x68\x66\x25\xea\xa8\xd3\x6d\x34\x2b\x9b\xd1\x26\x75\x2c\x4b\xcd\
\x66\xc0\xc6\xb4\xa6\x71\xba\x68\x39\x32\x65\x31\xe5\x68\x62\x4d\
\x65\x96\x96\x83\xe4\x35\x20\xd7\x1c\x14\x13\x64\x5d\x01\x11\x01\
\x85\xc5\x0b\xec\x8d\xd3\x1f\xc4\x26\x22\xb1\x8c\x96\x3d\x7f\x7d\
\xdf\x79\xcf\xf7\x3c\xcf\x77\xce\x7b\xce\xbc\xaf\x90\x52\x72\x23\
\xc4\x2a\x31\x04\x78\x0a\x30\x23\x48\x44\x12\xc6\xed\x40\x50\x87\
\xc4\x02\x1c\x03\xb6\xc8\x4c\x79\xa2\x43\xb8\xdd\x80\xc8\x15\x6a\
\x4a\x58\x26\x84\xc8\x92\x52\x2a\x00\x26\x6d\x30\x91\xc6\xe8\xdb\
\xd2\xaf\xb2\x57\xd0\xe8\x68\x68\xd3\x10\xc2\x25\xa5\xcc\x22\x81\
\xb7\xe5\x2c\xe9\xf1\x1a\xf8\x4b\x7c\x2f\x90\x66\xd0\x1a\x65\xe6\
\x03\x6b\x45\x7a\xec\x24\x62\x4d\xf7\xde\x96\x78\x3b\x6c\x8d\x56\
\xf2\x6c\x7b\x58\xb5\x7f\x85\x6c\x72\xd8\x05\x90\x4f\x02\x13\xe4\
\x2c\xe9\x69\x33\xb0\x4a\xbc\x06\xbc\x39\xa6\x6f\x0a\x9f\x66\x7c\
\x49\x9f\xa0\xbe\x77\x44\xf8\x66\x5c\xb8\x7a\x9e\x67\xbe\x7b\x8c\
\x82\xf3\x87\x00\x56\xca\x4c\xb9\x46\x90\xc5\x10\x21\x84\x45\xef\
\x67\xd0\x14\x3c\x5b\x22\xc2\x83\x22\x7a\x44\xea\x74\x80\x9f\xb6\
\x67\x26\x92\x37\x0f\x96\x57\x9c\x4d\x6e\x29\x65\xa2\x9a\x34\x96\
\x00\xe3\xd6\xa4\xbf\x2b\xc6\x46\xa7\x03\xb0\x25\x77\x05\x87\x0a\
\xb6\x91\x38\xec\x61\xa4\xf4\xb0\x71\xf3\x3c\x02\x74\x46\x7a\x85\
\xc5\x75\x22\x54\x6b\xe0\xc3\x8f\xea\x69\x6c\x14\xdc\x77\x9f\xd2\
\xad\x01\xbd\x9f\x01\xa3\xd6\x28\xf6\x94\xef\x52\x03\x4d\x2a\xc0\
\x0c\x90\x1e\x3b\x09\x00\xb7\xdb\xc9\xce\xdd\xeb\xc8\xdb\xff\x09\
\x27\x4e\xe5\x51\x59\x5d\x42\xfe\xc1\xcf\x10\x42\x74\x49\x3a\x69\
\xae\xc2\x57\xa9\x0b\xc8\x3e\x78\xd8\xa7\x55\x68\xd7\x02\xcc\x2a\
\x04\x89\x26\x6d\xb0\x37\xe1\x2a\xaa\x8a\xf1\x78\x5c\x44\x47\x0e\
\xe1\x48\x51\x2e\x67\x6d\x16\x00\x62\xa3\x47\x74\x49\xd8\x2f\xc0\
\xc8\xa8\xab\xd3\x08\x8f\x8b\xa5\xfa\xbc\xec\x72\x5e\x3b\x62\x4d\
\xf7\x62\xd2\x06\x83\x20\x51\x85\x24\xec\xc6\xa3\x66\x3d\x67\x41\
\xa5\x52\x33\x6b\x5a\x16\x85\x96\x6f\x38\x73\xb6\x88\x5e\x61\x71\
\x04\x06\x98\xfe\x91\xf4\xf9\xb0\x69\xec\x58\x62\xe0\x64\x5d\x2d\
\xc7\x0a\xba\x37\x11\x69\x8c\x06\x49\x98\xea\xe6\x80\xd5\x76\x94\
\x88\xf0\x01\x24\x99\x33\x68\xf5\xb8\xd9\x7f\xf8\x73\xe2\xa2\xcd\
\xdd\x12\x02\x4c\x5d\x5e\xc3\x8f\xa2\x80\xca\xd8\x43\x3e\xcd\x07\
\xd0\xdc\x3c\x70\xd6\x66\x21\x2e\xc6\x8c\x5a\xad\x90\x94\x38\x95\
\xfc\x83\x39\xc4\xc5\xb4\x19\xb8\x74\xb9\x8a\xd6\x56\x77\x87\xf9\
\xf7\x84\xc6\x78\xf3\xc3\x64\x32\xd0\x12\xb1\x8b\x9d\xd5\x0a\xcd\
\x35\x12\x93\x94\x94\xeb\x4f\xf2\x42\xd0\x02\xdf\x0d\xcc\x7d\xea\
\x3d\x82\x8d\x7d\x00\x78\x72\xe6\x1a\xd2\x52\xe7\x12\x19\x31\x08\
\x8f\xc7\xcd\xc2\x65\xfd\x71\xb9\x5a\xbc\x73\x83\x02\x43\xd8\xbc\
\xa1\xde\xfb\xee\xec\x7b\x82\x66\x93\x95\xf0\xe6\x38\x7e\x0e\x59\
\x46\x90\xa2\x61\xa2\x9c\x07\xae\x1e\xac\xc0\x80\x7e\xf7\x7b\x9f\
\x83\x4d\x11\x04\x9b\xfe\xbe\x17\x72\x3e\x6a\x40\x4a\xc9\x27\x5f\
\x2c\xe4\x70\xe1\x97\xbc\xba\xf8\x1b\xef\xdf\x5b\x9d\xd5\x1c\x0f\
\xda\x43\x70\x6b\x33\xad\x75\x0e\xf4\xa1\x0a\xe6\xba\x79\x3c\x62\
\x9c\xdd\xb5\x3a\xd0\x29\x07\xfe\x09\x1a\x8d\x96\x9c\xad\x2f\x71\
\xa4\x70\x3b\x2b\x5f\xde\xcd\xa0\xf8\xb1\xde\x98\xc3\xbf\x01\xd1\
\xa2\x86\x56\x85\x80\x04\x1b\xce\x8a\x31\xcc\xe9\x46\xbc\x47\x06\
\xa4\x94\x64\xe7\xcc\xe7\xc0\x91\x2d\xac\x5c\xba\x9b\x81\xfd\x53\
\x3a\xc4\x07\xb5\x0e\x66\x99\x7a\x0d\x81\xf6\x74\x1a\x15\x7f\x12\
\x02\x7c\x4b\xdc\x4e\x5b\xd0\x15\xb2\x73\xe6\xb3\x37\x3f\x9b\xb4\
\xd4\xa7\xa9\xa8\x2c\xa6\xa2\xb2\x98\xe4\xa4\x99\x9c\xc3\x86\xfd\
\x8a\x40\x57\x35\x8c\xc4\x24\x0d\xd2\x5d\x8f\xeb\xe2\x40\x66\xe8\
\xa6\xdf\x39\x03\x2e\x57\x0b\x17\x6a\x4a\x49\x18\x38\x8e\xda\x7a\
\x1b\xb5\xf5\x36\x00\x52\x92\x1f\xa7\x48\xee\xa3\x3e\xc2\x8a\xc5\
\x76\x95\xd1\xef\x4f\xa4\x32\xfc\x14\x53\x62\xc7\xa3\x1f\xd2\x0c\
\xf8\xdf\x19\x03\x8a\xe2\x4f\xd6\xf2\x7d\x9d\xc6\x0f\x9c\x2f\xe4\
\x78\x54\x2e\x41\xe5\xd7\x11\x3b\xca\xf8\xad\x69\x07\xc3\xc5\x83\
\x1c\x29\xde\x8a\xdf\x65\x85\x8c\xc9\x4b\xef\x8c\x81\x5b\x41\x4a\
\xd8\xa6\xdf\x44\x9f\x66\x37\xa7\x3e\x28\xa5\xb7\xa1\x1f\xab\xdf\
\x38\x48\x60\x60\x30\x00\x0e\xe7\x75\x9f\x78\x7a\x74\x0a\xda\x71\
\xf2\x84\x83\xf5\xca\x12\xe2\xef\x39\x4e\x43\x59\x0b\xce\x06\x27\
\x19\x13\x96\x22\x54\x2a\x32\xdf\x4a\x23\xf3\xad\x34\xaa\x2f\x9c\
\xfe\x77\x0c\x54\xd9\xaf\xb1\x37\x66\x21\xfe\xca\x51\x9c\x1a\x03\
\xb5\x1a\x3d\x00\x5a\x6d\x20\x6a\x95\x86\x81\xfd\x53\x28\xf9\xe3\
\x17\x42\x43\x22\x7d\xe2\xeb\xd1\x16\xe4\xb6\xe4\x50\x1a\xb4\x15\
\xad\x4b\x87\xdd\xe9\xcf\xb5\xeb\xc3\x59\x14\x35\x87\x6c\xd3\x23\
\x7c\xba\x65\x31\xe5\x67\x8b\x28\x2b\x2f\x20\x34\x24\x0a\x83\xde\
\xb7\x5a\xd6\xf7\x63\x68\xfd\x10\x77\xfc\x76\x34\x2e\x1d\x1e\x97\
\x11\xbb\xd6\xc1\x3a\xcf\x3b\xa0\x85\xc1\x6b\x8b\x29\xb2\x7c\xcb\
\x19\x6b\x21\xa3\xcc\x53\x48\x32\x4f\x05\xe0\x54\xe9\x01\xd6\x6f\
\x6a\xbb\x8c\x8c\x86\x5e\x8c\x1e\x39\x9d\xe9\x8f\xae\xe8\xc0\xeb\
\xd3\x16\x6c\xcf\xaf\xc2\x16\xf6\x03\xce\x9a\x28\xfc\x35\x02\x47\
\xef\x8b\x0c\x68\x19\xe9\x8d\x07\xe8\x8c\x8c\x4b\x99\xc3\x73\xb3\
\x37\x90\x31\xf9\x15\xfa\x84\xc7\x03\x50\x56\xfe\x2b\xf5\x97\x2b\
\x58\xba\xe8\x6b\xa2\xa3\x86\xb1\xf5\xab\xd7\x3b\x25\xa7\x0a\x41\
\x5d\x95\xbd\xa2\x4b\xf1\x2b\x4d\x92\x7d\x6c\xe3\x9a\x27\x9c\xea\
\xc8\x5a\xaa\x14\x17\xd7\x6a\x06\x10\x5a\x3a\xa9\xcb\x6f\xda\x61\
\xb5\x59\xd0\x68\xb4\xfc\x94\xb7\x91\xa3\xbf\x7f\xcf\xe4\x09\x2f\
\xa2\xf5\x0b\x00\xda\xca\x75\x04\x75\x2a\x24\x96\x46\x47\x03\xb6\
\x46\xeb\x2d\x49\xf4\x06\x41\xc6\xd0\x54\x7a\x5f\x1a\xcf\x55\x7b\
\x34\x0d\x2a\x3f\xa6\xf3\x1c\x33\xfa\xa5\x76\x6f\xe0\x9c\x85\x98\
\xc8\xa1\x44\x84\xc7\xa3\xd1\x28\xd4\x5d\x3a\x07\xb4\x95\xe9\x8d\
\x8e\x06\x90\x58\x54\xb4\x75\x2c\xe4\xd9\xf6\x74\x49\xf4\x50\x48\
\x32\x33\xc3\x32\x98\xfd\xeb\x26\x66\x97\x7e\x4c\xb2\x61\x68\xb7\
\xe2\xcd\x2d\x57\xa8\xb9\x58\xc6\x48\xf3\x14\x52\xc6\x3c\x8e\x9f\
\xa2\x23\x40\x67\xe4\x26\xad\x63\x3e\x97\xe5\x3b\x7e\xaf\x60\xe6\
\x08\xdf\xbb\xa4\x92\xd3\xfb\xc9\x5c\x3b\x0e\x68\x4b\xc0\xa1\x09\
\x13\x78\x62\xc6\x6a\x3c\x3a\xbf\x0e\x65\xf9\xdd\x6f\x4c\xfe\x17\
\xad\x19\xdc\xe5\xe6\xf4\x46\xfc\xd7\xed\xf9\x9f\x29\xa9\x72\x1d\
\x98\x74\x0e\xd9\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\
\x00\x00\x04\xf6\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x04\x73\x49\x44\
\x41\x54\x58\x85\xcd\x97\x5d\x6c\x54\x55\x10\xc7\x7f\xe7\x7e\x75\
\xdb\x5d\x08\xb4\xdd\xd2\x1a\x11\x6c\x22\x02\x4d\x05\x1b\x42\x8d\
\x86\x40\x34\xf8\x40\xa2\x89\x49\xbb\x20\x42\x1b\x7c\xd0\x84\xa7\
\x26\xbe\xf4\x89\x4a\x63\x82\xf1\x09\x1f\x34\xa1\xc1\x5a\x1b\x30\
\xd0\x0a\x42\xd5\x26\x48\x4c\x20\x10\x09\x21\x96\x34\x05\x41\x42\
\x4b\xd4\xd0\x96\xb6\xfb\xc1\x87\xed\xf6\x7e\x1d\x1f\x76\xdd\xee\
\xdd\x96\xee\x96\x36\xea\x24\xf7\x61\xe6\xce\xcc\xff\x7f\xee\x99\
\x3b\x67\x8e\x90\x52\x92\x2e\x62\xbf\x28\x01\x42\x40\x55\xf2\x29\
\x65\x7e\x32\x0c\xf4\x24\x9f\x0e\xd9\x24\x47\x3c\x78\xe9\x04\x44\
\xb3\xa8\x15\x52\x7c\x2e\x91\xc5\x00\x9a\xaa\xc9\x42\x5f\x91\x98\
\x0f\x7a\x24\x1e\x96\xb6\x63\x0b\x00\x81\x18\x93\x42\xee\x95\xfb\
\x64\xe7\x34\x02\x62\xbf\x38\x04\xbc\x67\xa8\x86\x6c\xa8\x6e\x14\
\x5b\xcb\xb7\x51\x19\x5c\x47\x9e\xe6\x9b\x0f\x3e\x93\x76\x9c\xbe\
\xd1\x5e\xce\x0e\x74\x73\xf0\xf2\xc7\xd2\x74\x4c\x01\xb4\xc8\x26\
\xf9\x7e\x8a\x80\x68\x16\xb5\x48\x3a\x56\x15\xad\xe1\xab\x37\x3b\
\x59\x5d\x5c\x31\x2f\xd0\xc7\xc9\xcd\xb1\xeb\xd4\x77\xd5\x72\x2b\
\x7c\x03\x04\x21\xb9\x4f\x76\x0a\x3e\xa4\x44\x20\xae\xeb\xaa\x5e\
\x74\xbe\xae\x47\xcc\x15\xdc\x75\x41\x51\xe6\x46\x62\x73\x7b\x95\
\xb4\x1c\x2b\x2c\x91\x15\x0a\x10\x92\xc8\xe2\x86\xea\xc6\x39\x83\
\x43\x02\xfc\xde\x3d\x2b\x67\xff\xd5\xc5\x15\x34\x54\x37\x8a\x64\
\x9d\x85\x14\x12\x95\xce\xd6\xf2\x6d\x39\x27\xd1\xbf\x3f\xcd\xa2\
\x2d\x2f\xa5\x9e\x67\x43\xaf\x70\xed\xc6\x09\xa2\x6e\x2c\xa7\xf8\
\x34\xac\x2a\x0d\xa8\xd2\x54\x4d\x56\x06\xd7\xe5\x5c\xed\x79\x9f\
\x7d\x8a\x7a\xe5\x72\x4a\x57\x81\x55\xad\x3f\x32\xd6\xbc\x0d\x1f\
\x90\x9f\x3f\x7b\x7c\x65\x70\x1d\x9a\xaa\x49\xdb\xb1\xab\x14\xa0\
\xb4\xd0\x57\x24\x72\xad\x76\x65\xa0\x1f\xed\xc2\xb9\x69\xf6\xe0\
\xb1\x13\x0c\x5e\x71\x79\x30\x39\x49\x34\x9c\x65\x01\x9a\x8f\xe4\
\xef\x5d\x3a\x87\xf2\x49\x88\xd1\xde\x0a\x19\xcd\x0b\x40\x44\xc2\
\xac\xfa\xad\x83\xbb\xea\x10\x13\x46\x16\x06\x69\x32\x37\x02\x8e\
\x83\x71\xa4\x2d\x0d\xd5\xbb\x6b\xc1\xe3\xad\xc8\x82\x31\x06\xc5\
\xef\xdc\x7d\x18\x65\x78\x62\x94\xdb\x76\xff\xc2\x11\xd0\xcf\x74\
\xa3\x0c\x0d\xa6\x74\x73\xc7\x2e\xdc\x65\x53\x9d\x3a\xff\xd2\x45\
\x7c\x77\x6e\x60\x18\x2e\x83\x8b\x7b\x18\xf2\x5f\xc3\x6f\xa8\x0b\
\x47\xc0\x68\x3b\xec\xd1\xcd\xba\x3d\x58\xdb\xdf\xf1\xd8\x56\x1c\
\xee\x82\x38\xe4\xdd\x2f\xa1\xd8\x2c\xa7\xd4\x59\xb9\x30\x04\x94\
\xe1\x21\xf4\x33\xdd\x29\xdd\x7d\x7a\x39\xf6\xa6\x2d\x98\xbb\xea\
\x3d\x7e\xcb\x8e\x9c\x41\xf7\x45\x08\x14\x04\x58\xae\xad\xc8\xdc\
\xa5\x27\x27\x60\x1c\x69\x03\xdb\x4e\xe9\xe6\x8e\x5d\x20\x04\x4e\
\x45\x25\xce\x0b\xeb\x53\x76\x3d\xfa\x90\x25\xa7\x2e\xe0\x53\x8d\
\x9c\xf2\xe6\x46\x40\xca\x44\xf5\xa7\x89\xb5\x73\x37\x00\xf7\xed\
\x28\xd1\x50\xad\xe7\x5d\x59\xcb\x0f\x14\xba\xb9\x9d\xe2\x39\x11\
\xd0\x2e\x9e\x47\xe9\xbf\x9d\xd2\x9d\xaa\x0d\x38\xcf\xaf\x01\x20\
\xc2\x08\x7f\xec\xde\x88\xd4\xa6\x8a\x6d\xc9\xa5\xab\xf8\x6f\xdf\
\x5a\x38\x02\xc6\x97\x19\xc5\xb7\xb3\x0e\x80\xd1\x89\x08\x31\xfd\
\x4f\x26\x83\x82\xe8\xeb\x1b\xbd\x31\xad\x2d\x39\x11\xd0\xb2\x39\
\x88\x58\x14\xfd\xf4\x09\x8f\x4d\x3f\x7d\x12\xed\xdc\x4f\xd8\x6e\
\x8c\x22\xc5\x42\x08\x41\xe0\x4e\xd4\x4b\xe0\x68\x3b\xf1\xfd\x07\
\x90\xbe\xd9\x3b\x6c\x56\x02\xc6\xb1\xa3\x88\x78\xdc\x1b\x94\x6c\
\xc5\xc5\xb3\x11\x8f\x46\xd0\x4f\x7d\x93\x28\xd6\x59\x24\xeb\x16\
\x64\xfe\xfb\x73\x11\xe3\x8b\x43\x59\x7d\x66\xfd\x02\xea\xd5\x5f\
\x50\xfb\x7a\x53\xba\x1b\xf0\x13\xdb\xf2\x22\x8a\x90\xb8\x52\xa0\
\x48\x1f\x3a\x79\xa8\xae\x8a\xaa\x0a\x70\x5d\xf4\xee\xef\xa6\x92\
\xff\x7c\x11\xf5\xe6\xaf\x38\xab\xd7\x3e\x19\x81\xcc\xd5\x87\xdf\
\x7e\x8d\xfe\x83\x1f\xa0\xda\x79\xfc\x95\x67\xb2\xde\xd9\x84\x05\
\xa4\x8f\x23\x81\x57\x5f\x46\xbb\x7c\x69\x2a\x47\x6b\x0b\x13\x9f\
\x1c\x7c\x2c\xc6\x63\xb7\x40\x8c\x8f\x63\x1c\xff\xda\x63\x1b\xad\
\x79\x03\x55\x80\xa9\x2a\x2c\x76\x97\xce\x18\x67\xd6\xbd\xeb\x5d\
\xc4\xd1\xf6\x69\x35\x94\x49\x60\x38\x12\x0f\xcb\x49\xdb\xeb\xa4\
\x7f\xdb\x89\x78\xf8\x20\xa5\x8f\xaf\x7c\x86\xe1\xcd\xcf\xf1\x48\
\x71\xb1\xb4\x71\xf2\x1f\x3d\x35\x63\x42\xab\x66\x3b\xd2\xef\x9f\
\x5a\x48\x2c\x8a\x7e\xb2\xc3\xe3\x33\x69\xc7\x89\xc4\xc3\x12\x18\
\x56\x80\x1e\xdb\xb1\x45\xdf\x68\xaf\xc7\x29\xf3\xf3\xc7\x6a\x42\
\x48\xab\x00\x1b\x95\x22\xab\x9c\x32\x7f\xe1\x8c\x04\x64\x60\x11\
\xd6\x5b\xde\xce\x98\xd9\x13\xfa\x46\x7b\x49\xde\x15\x7a\x34\x12\
\x37\x96\x3d\x67\x07\xba\xd9\x50\x56\x9d\xcc\x22\x89\xef\xfb\x08\
\x48\x1b\x3c\xd6\xac\x65\x65\x38\x88\x13\x88\x53\x92\x65\xe6\x8a\
\x37\x1f\xc8\x38\xa4\x44\x62\x88\x49\x9e\x4c\x67\x07\x52\x87\x5a\
\x4f\xce\x63\x79\x74\xdc\x62\x69\x81\x3e\x2b\x70\x2e\x32\x6d\x2c\
\x97\x4d\x72\x44\x0a\xb9\xd7\x74\x4c\x51\xdf\x55\xcb\xcd\xb1\xeb\
\x33\x06\x2e\x14\x78\x7d\x57\x2d\xa6\x63\x0a\x29\xe4\x5e\xd9\x24\
\x47\xfe\x1f\x57\xb3\x7f\xe4\x3f\xbd\x9c\xa6\x0c\xff\xf2\xf5\xfc\
\x6f\x06\xc8\xf8\x14\x54\xa0\x7f\x77\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
\x00\x00\x04\xe2\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x04\x5f\x49\x44\
\x41\x54\x58\x85\xcd\x97\x7d\x4c\x55\x65\x1c\xc7\x3f\xcf\x39\xe7\
\xca\x45\x79\x49\xb8\x80\x31\xd1\xe5\x46\xe2\x80\x48\x46\x71\x2d\
\xd0\x5a\xba\xf9\x52\x0e\x97\xd0\x5a\xbe\x94\x0b\x6b\x6c\x16\x35\
\x4a\xaa\x85\xc9\xcc\xe1\x1f\x15\x1b\xd3\x5e\xb6\x9c\xa2\xfe\x03\
\xa5\xa4\xc6\x4c\x06\xe4\x7c\x29\xb0\xb1\x39\x76\x53\xb1\xb9\xe9\
\x92\x81\x5e\x40\x07\xc8\xbd\xdc\x73\xcf\xd3\x1f\x70\x19\x74\x2f\
\xdc\x17\xca\xfa\x6e\xcf\x76\xce\xef\xfc\x9e\xe7\xfb\x39\xe7\xf9\
\xed\x39\xfb\x09\x29\x25\xe3\x25\x76\x8a\x78\xa0\x00\xc8\x1c\x1d\
\x73\x98\x9e\xba\x80\xb6\xd1\x51\x23\x77\xc8\xdb\x13\xfc\xc6\x03\
\x88\x72\x91\x2f\xa4\xd8\x27\x91\x16\x00\x4d\xd5\x64\x8c\x39\x56\
\x4c\xc7\xbd\xd7\xd1\x23\x75\xb7\x2e\x00\x04\xc2\x2e\x85\x2c\x92\
\x65\xb2\xd6\x0b\x40\xec\x14\x5f\x03\x5b\x67\xa8\x33\x64\x71\x76\
\xa9\x58\xb1\x60\x35\xe9\x71\x19\x84\x69\xe6\xe9\xf8\xe3\xd4\x1d\
\xb4\xdf\xb9\x44\xc3\xf5\x7a\x2a\x5b\x2a\xe4\xb0\x7b\x58\x00\xdf\
\xc8\x1d\xf2\x8d\x31\x00\x51\x2e\xf2\x91\xd4\x3c\x1a\xbb\x88\x83\
\x6b\x6b\x49\xb1\xa4\x4e\xcb\x74\x32\x5d\xb1\xdb\xd8\x7c\x3c\x9f\
\x8e\x9e\xcb\x20\x28\x90\x65\xb2\x56\xf0\x09\xf1\x02\x61\x33\xa9\
\xa6\xd8\x33\x9b\xda\x44\xb0\xe6\x86\x01\x8a\x12\x1c\xc4\xb2\xea\
\x4c\xe9\x72\xbb\x7a\x24\x32\x55\x01\x0a\x24\xd2\x52\x9c\x5d\x1a\
\xb4\x39\x8c\x98\x77\x77\xbb\x02\xce\x4f\xb1\xa4\x52\x9c\x5d\x2a\
\x46\xeb\xac\x40\x61\xa4\xd2\x59\xb1\x60\x75\xd0\xe6\x1e\x45\x58\
\x74\x5a\x5d\x17\xe9\x33\xee\x06\x94\x3f\xce\x2b\x53\x01\x32\x35\
\x55\x93\xe9\x71\x19\x21\x03\xcc\x52\xc3\xb1\x18\x89\x0c\x39\xc3\
\x18\x1a\xf2\x9f\x9f\x1e\x97\x81\xa6\x6a\x12\xc8\xd4\x80\x39\x31\
\xe6\x58\x11\x6a\xb5\xff\x78\xad\x6e\xec\xfa\xda\x55\x37\x71\x73\
\x0d\x34\xc3\x44\x44\xa4\xef\xfc\x35\xc9\x79\x84\x69\x66\x62\xcc\
\xb1\xe2\xf6\x60\xf7\x1c\x2d\x24\xd7\x71\xda\x50\xb7\x6e\x62\xe0\
\xf2\xd4\xf9\x7d\xef\x4d\x3c\xf8\x82\xa8\xdf\x7f\x47\x01\x7d\x81\
\xa6\xc6\xd3\x84\x9b\xc3\x59\xf2\x74\xae\xd7\xb3\xc3\x79\xc7\xc6\
\xae\xfb\xe4\x3d\xec\xea\x9f\x98\x86\xc3\xd0\x4d\xf7\x51\x84\x60\
\x36\x09\xcc\x36\xe2\x43\x07\x70\x0c\x0d\x51\xf2\xf6\x26\x06\x06\
\x07\xa8\x3e\x72\x0a\xeb\x53\x39\x13\x9e\xaf\x49\xce\x03\x60\xd0\
\x70\x70\x53\xeb\xc0\x49\x12\xca\xdd\x28\x20\x8c\xd9\x33\xa3\x98\
\xab\xce\x47\x4c\x71\x98\xfb\xdd\x82\x3d\xbb\x3f\xc2\xba\xa4\x9f\
\xa3\xdf\x0f\xf2\xea\xc6\x55\xfc\x7a\xe1\x9c\xcf\x3c\xb7\xea\x44\
\xb8\x05\xa0\xa0\x46\xf7\x13\x31\x33\x82\x24\x6d\x6a\x73\xbf\x00\
\xbf\xdb\xda\x39\x7c\xe8\x2b\xbe\xf8\xfc\x3e\x39\x39\xf0\x5d\xed\
\xc0\xa4\x10\x51\x32\x9a\x14\x91\x8e\xe6\x8a\xc7\xa5\xa8\x98\xd5\
\x19\xfe\xde\x6d\x6a\x00\x29\x25\xef\x6c\xdb\xc0\xa7\xbb\x9c\x24\
\x24\x8c\xc4\x7c\x41\xdc\xd3\xfb\xb0\x0f\xf5\xd1\xd7\x6b\x8c\xcc\
\x33\x9c\x18\x8e\x48\x62\x8c\xc0\xfe\xe2\x93\xd6\xc0\x81\x6f\xbf\
\x44\x55\xaf\x53\x58\x68\x4c\x88\x7b\x20\xd6\xe7\xaf\xe2\xad\xe2\
\x72\x2c\x69\xb1\x24\xa5\x25\x22\xb4\x68\x12\x6f\xa4\xe1\xb4\x68\
\x2c\x8c\x48\xc2\x2c\xd5\xd0\x01\x6e\x77\x77\xb1\x7b\xd7\x76\x9a\
\x1a\x07\x7c\xee\x61\x4e\x0e\xd4\x1d\x1b\x60\xff\xfe\x0f\x39\x7e\
\x42\xe5\xb2\x6d\x18\x45\x51\x49\x59\x98\x4c\xca\xa2\xc5\xbc\xf4\
\x72\xa1\x57\xb1\x06\x05\xf0\xc1\xfb\x5b\xd9\xf2\xda\x30\xe9\xe9\
\x93\x4f\xb4\x5a\xc1\x6a\x75\x8c\xdd\x77\x76\xba\xb0\xd9\xda\xa9\
\xaa\x6a\xe7\xe7\xe6\x47\x42\x07\x68\x6c\x38\xc5\xd9\x33\xa7\x39\
\x74\x70\x38\xa0\x05\x3c\x4a\x4c\x1c\x19\xe7\xcf\x83\x23\xf0\x9f\
\xa3\x77\x11\x86\x99\xcd\xcc\x4d\x9a\xcf\xd2\x65\x91\x9c\x3c\x19\
\x14\x43\x48\xf2\x02\xc8\xc9\x7d\x86\xa6\xb3\x57\x29\xd9\x7e\x84\
\x8f\xcb\x92\xc9\x7a\x22\x92\xd6\xd6\x07\x08\xe0\xd1\xca\xd5\x2f\
\xd0\x7c\xae\x83\xc7\x1e\x7f\x85\xfa\xfa\xc0\x16\x73\xb9\xe0\xe6\
\xcd\x7f\x08\xc0\xa3\xf8\xf8\x04\xbf\x8b\xdc\xba\x05\x65\x65\x1a\
\x49\xf3\xc2\xe9\xb8\xb6\x98\xe5\x2b\x56\x06\x05\xd0\xd5\xeb\xe8\
\x91\x4e\xdd\xe1\x33\xc1\x6d\x48\x9f\x71\x80\xe6\x66\x78\x71\xfd\
\x2c\x52\xd3\xc2\xe9\xec\xda\xc8\xb1\x13\xad\x9c\xfc\xa9\x8d\xac\
\x27\xad\x53\x9a\x3a\x75\x07\xbd\x8e\x1e\x09\x74\x69\x40\x9b\xee\
\xd6\x33\xda\xef\x5c\x22\xeb\xe1\x6c\xaf\x64\x55\x99\x78\x10\xf4\
\xf7\x43\x75\xb5\x60\xef\xbe\x59\x48\x19\xc3\x96\xd7\x4b\xa8\xdc\
\xbb\x99\xc8\xc8\x28\x7f\x2f\x3b\xa6\xf6\x3b\x97\x18\xed\x15\xda\
\x14\x46\x3a\x16\x1a\xae\x4f\xbd\xd1\x36\x1b\x14\x15\x99\x99\x37\
\xcf\x4c\x43\xe3\x4a\xf6\x7c\x76\x9c\x5f\x7e\xbb\x41\xe1\x9b\xdb\
\x82\x32\xff\x9b\x57\x9b\x02\xd4\x08\x84\xbd\xb2\xa5\x42\x5e\xb1\
\xdb\x7c\x4e\xa8\xaa\xd2\x78\x6e\x79\x34\x51\x0f\xbd\xcb\x85\x8b\
\x7f\x70\xe0\x70\x3d\xb9\x4b\x9f\x0d\xca\xd4\xa3\x2b\x76\x1b\x95\
\x2d\x15\x52\x20\xec\x40\x8d\xdf\xc6\xe4\xc4\x0f\x47\xd1\x75\x9d\
\xe7\xd7\xae\xc3\x64\x32\x85\x64\x3a\xde\xdc\xab\x31\xf9\x5f\xb4\
\x66\x1e\xfd\xa7\xcd\xe9\x58\xe0\x01\xb7\xe7\x7f\x01\x38\x4f\xdd\
\x4b\xe4\x65\x71\x13\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
\x00\x00\x05\xf4\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x05\x71\x49\x44\
\x41\x54\x58\x85\xcd\x97\x7d\x4c\x55\x65\x1c\xc7\x3f\xcf\x39\xe7\
\x1e\x2e\xc8\x8b\x70\x2f\x72\x11\xf1\x0a\xf9\x96\x48\x2e\x64\xb1\
\x1c\x8e\xac\xb1\x8c\xa6\xa5\x9b\xc4\x5a\x69\xba\xb2\xe5\x5c\x92\
\xb5\x24\xb7\x34\x75\xb6\x32\xad\xa8\x29\xe9\x7c\x99\x2f\xb3\x4d\
\x4d\x97\x6f\xa9\xcc\xb7\xb2\x52\x54\x16\xe1\x0b\x32\x05\x4d\x10\
\x08\x2e\xc2\x15\x11\xb8\xe7\xde\xa7\x3f\x90\x3b\x6e\xf2\x22\xe2\
\xb2\xef\xf6\x6c\xe7\xfc\x7e\xbf\xe7\xf9\x7e\xcf\x73\x7e\xcf\x6f\
\xcf\x4f\x48\x29\x69\x0f\xb1\x48\xf4\x03\xd2\x81\x84\xbb\xc3\x46\
\xef\x50\x09\xe4\xdf\x1d\xdb\xe4\x42\xf9\xb7\x0f\x5f\x7b\x01\x62\
\xb1\x98\x22\xa4\x58\x25\x91\x56\x00\x4d\xd5\x64\x98\xd9\x22\x7a\
\xc3\x5e\xdb\xe4\x90\x86\xdb\x10\x00\x02\x51\x23\x85\x9c\x25\x17\
\xc8\xed\xf7\x08\x10\x8b\xc4\x6a\x60\xa6\xae\xea\x32\x33\x29\x4b\
\xa4\xc6\xa6\x11\x1f\x3e\x0a\x3f\xcd\xdc\x1b\x7e\x9a\x8d\x26\x0a\
\xab\x0b\xc8\x2d\xd9\xcf\xd7\xa7\x3e\x93\x2d\xee\x16\x01\xac\x91\
\x0b\xe5\xdb\x5e\x01\x62\xb1\x98\x82\x64\xdb\x50\xcb\xe3\x6c\x9c\
\xb8\x9d\xe1\xd6\xb8\x5e\x91\x76\x86\xa2\x9a\xf3\x4c\xdb\x3d\x85\
\x62\xc7\x45\x10\xa4\xcb\x05\x72\xbb\xe0\x13\xfa\x09\xc4\x79\x93\
\x6a\xb2\x1c\x9f\x9a\x2f\x7a\x4a\xee\xf1\x80\xa2\x74\xec\xbb\x78\
\xe3\x24\x3f\x15\xae\xa3\xac\xb6\x18\x55\x35\x91\x3c\x64\x12\xc3\
\xa2\x9f\x21\x65\x53\x82\x74\xb9\x5d\x0e\x89\x8c\xd3\x80\x74\x89\
\xb4\x66\x26\x65\x3d\xd0\x97\x2b\x0a\x54\x55\xb9\x88\x88\x30\x79\
\x6d\x2e\x77\x33\xd9\xb9\xb3\x38\x50\xb8\xde\x6b\x0b\x0f\x8a\x26\
\x29\x36\x0d\x5b\x48\x0c\x99\x49\x59\x62\xd9\x6f\x8b\xad\x40\xba\
\x42\x6b\xa6\x93\x1a\x9b\xd6\x63\xf2\x36\x04\x5a\x0d\xf2\x5c\xa7\
\xb9\xe9\xa9\xc3\xd1\x70\x83\xb9\xdf\xa7\xf8\x90\x5b\x03\xa3\x58\
\x91\x71\x14\x5b\x48\x0c\xff\xe2\x4a\xd0\x80\x04\x4d\xd5\x64\x7c\
\xf8\xa8\x07\xce\xf6\x3e\xaa\x3f\x56\xa3\x3f\x85\x65\x05\x7c\xbb\
\x3f\x83\x9b\x8d\x95\x5e\x5f\x58\x9f\x48\x96\x67\x1c\xa1\x7f\xdf\
\xc7\xbc\xb6\xf8\xf0\x51\x68\xaa\x26\x0d\xb7\x91\xa0\x00\xb6\x30\
\xb3\x45\xf4\x36\xdb\x71\x3a\xf8\x7c\xd7\x44\x1f\xf2\xd0\x3e\x11\
\xac\xc8\x38\xc2\x80\xd0\xa1\x3e\xa1\x7e\x9a\x99\xbb\xc7\xdb\xd6\
\x49\xfa\xf4\x0c\xd5\xb7\xca\x98\xbf\x23\x8d\x66\xb7\xd3\x6b\x0b\
\xf1\xb7\xf2\x45\xfa\x61\xa2\xc3\x86\x77\x39\xb7\x47\x02\x2a\xea\
\x4a\xf8\xf2\xe0\x5b\x4c\x5b\x3b\x84\x9d\x67\xb3\x01\x68\x6c\x71\
\x32\xff\x87\x34\x6a\x1a\xca\xbd\x71\xfe\x7e\x41\xbc\xff\xe2\x0e\
\xcc\x7d\xfa\x71\xd9\xb8\xd2\xe5\x9a\x5a\x4f\x04\x64\xed\x78\x9e\
\xf2\x9b\x97\x01\x58\x7d\xec\x03\x12\xec\xcf\xb1\xf2\x48\x26\xa5\
\xd5\x85\xde\x18\x5d\xf7\x67\xe6\xcb\xd9\x98\xec\x06\x15\xe2\x1c\
\x36\x62\xc0\xf3\x90\x04\x68\x8a\xee\x7d\x76\x7b\x0c\xde\xd9\x94\
\x88\xcb\xdd\xec\xb5\xa9\xaa\x89\x19\x93\x97\x60\x0f\x89\x87\x7a\
\x3f\x42\x03\x82\xb1\xa9\x76\xe8\x22\xbd\x3b\xfc\x05\x47\x72\xe1\
\x96\xf3\x5e\x7b\x5c\xd4\x18\x9f\xf7\xf6\xe4\x02\xc1\xeb\xe3\x97\
\x30\xd8\x3e\x1a\x35\xe4\x16\x81\x01\x81\x44\x6b\x76\x44\x37\x67\
\xeb\x1e\x01\x45\x17\x61\xc3\x5a\xf8\x6a\x19\x1c\x3d\xec\xeb\x4b\
\x8d\x9b\xda\xe9\x42\x33\xc6\x2e\xe5\xb5\x11\xf3\xd0\x5c\xfd\x70\
\x29\x2a\x66\x55\xef\x34\xb6\x3d\x7c\x7e\x81\xd3\x09\x93\x27\x42\
\x65\x05\x0c\x1b\x06\xaa\x0a\x79\xbf\xc3\x7b\x1f\x82\xae\x43\xfc\
\x80\xb1\x8c\x1c\x90\xcc\xb9\xb2\x13\x3e\x8b\x24\xc6\xa4\x31\x7e\
\xe8\x3c\x00\xa4\xa7\x19\x4f\x53\x10\x61\x8a\x0d\xd4\xee\x05\x78\
\x77\xc0\xe3\x81\xf4\x49\x8d\xd4\x3a\x40\x4a\x28\x2a\x82\x9c\x1c\
\x28\xbb\x0e\x4b\x17\x42\xfe\x99\xd6\xb8\xcc\xd4\x1c\xfc\xf5\x40\
\xef\x02\xf1\x31\x29\x64\x4c\x9c\xc7\x25\x71\x96\xbf\xae\xdd\xa1\
\xb9\x45\x63\x88\x1e\x83\x59\xbd\x0f\xf6\xf6\x3b\xb0\x6f\x8f\x93\
\xbc\x53\xc7\x80\x38\xa0\xb5\x6a\xdd\xbe\x0d\x5b\xb6\x40\x62\x22\
\x98\xcd\xf0\xeb\x09\x98\x3d\x67\x24\x3f\xbe\x5b\x47\x45\x5d\x29\
\x6e\x45\xc7\xd1\xb7\x18\x3f\x5c\x68\xfe\x6e\x22\x03\xcd\x0c\x34\
\xc5\x83\xec\x98\xac\xcb\x1d\x98\xf0\x52\x30\xbb\xf6\x3c\x8b\xd9\
\x7c\x0d\x55\xfd\x05\x68\xf2\x06\x9d\x39\x03\x6b\xd7\xc2\xf5\xab\
\xb0\xe0\x23\x28\xbd\xac\x12\x15\x3a\x98\x9a\x80\x2a\xcc\x12\x14\
\x45\xc1\xde\x32\x0a\x93\xa9\xe7\xd5\xdc\x27\x09\x53\xc6\x05\x72\
\xf5\xc6\x38\x9e\x1e\x63\x41\x88\x5c\xa0\xcc\xeb\xab\xad\x85\x75\
\xeb\xa0\xf0\x4f\xd8\xb6\x15\xbe\x59\x5f\x43\x90\x5f\x1d\x1e\x45\
\xc1\xd4\x12\x4b\x90\x1a\xd6\x63\xf2\x7b\x04\x00\xe8\xba\x60\xcf\
\x81\x11\xac\x5a\x93\x8c\xa6\x9d\x43\x55\xf3\x00\x17\xd0\x9a\x27\
\xc7\x8f\xc3\xe6\xcd\x50\x5d\x6c\x65\xc3\xbc\x14\xca\x2b\x83\xb1\
\xde\x89\x7a\x20\xf2\x0e\x05\xb4\x21\xe3\xd5\x50\x2e\x95\xa4\x32\
\x78\x88\x09\x21\x0e\x01\xd5\x5e\x5f\x45\x05\x7c\x97\x03\xd7\xae\
\xe8\x9c\x5e\x99\xc8\xc1\xbd\xf7\x97\x70\x3d\x12\x00\x10\x66\x51\
\x39\x79\xf6\x49\xe6\x7f\x3c\x1a\xa1\x9c\x42\x51\xff\xa0\xad\xae\
\x1a\x06\xec\xdf\x07\x7b\xf7\x42\x44\xb8\x0b\x5d\x31\x1e\xbe\x80\
\x36\xbc\x32\x3d\x98\xad\x27\xa3\x09\xb5\x3a\x51\x94\x5c\xa0\x0e\
\x00\x93\x09\x9e\x4a\x72\x31\x36\xb9\x11\x7f\x3f\x15\x55\x74\x51\
\xf4\xbb\x10\x50\x59\xdb\xe4\x90\xcd\x46\x53\x87\x01\x2d\x2e\x0f\
\xd5\x4a\x15\xd6\xfe\x1a\xdb\xae\x49\x5e\x78\x23\x18\x21\x7e\x46\
\x88\x4b\xc4\xc4\xba\x59\xb1\xfc\x16\x1e\x8f\x9b\xc6\xc6\x46\xa4\
\xeb\x36\x6e\xb7\xbb\x5b\xd2\x66\xa3\x89\xda\x26\x87\x04\x2a\x15\
\x20\xdf\x70\x1b\xa2\xb0\xba\xa0\xc3\x60\xdd\xa4\x10\x19\x1c\x4c\
\x5f\xcd\x82\x74\x05\x30\x67\x95\xc1\xc6\x43\x4f\x10\x1c\x52\x49\
\x76\xf6\x49\x4a\x4a\x2e\x53\x5f\x5f\x4f\x69\x69\x29\x76\xfb\x40\
\xe6\xce\x7e\xb3\x5b\x01\x85\xd5\x05\xdc\xed\x15\xf2\x15\x5a\x3b\
\x16\x72\x4b\xf6\x77\x3a\x21\x52\xb7\x60\xf3\xb7\x30\xc8\x91\x88\
\xbd\x21\x91\x09\x63\x06\x71\xb5\x3c\x85\x81\x83\x22\x71\x3a\x1b\
\x38\x95\x97\xc7\xf4\xe9\xd3\x71\x3a\x9d\x6c\xd9\xb2\x89\xd2\x2b\
\xc5\x5d\x0a\x68\xc7\x95\x7f\xdf\xd7\xf2\x9b\x8d\x2e\x42\x03\x4c\
\x1d\xfa\x00\x2e\x9c\x2b\x60\xdf\xee\x9d\x14\x15\x5d\xe0\xd3\x65\
\x5f\x13\x61\xeb\xf8\x68\x16\xd5\x9c\xf7\xb9\x96\x3f\xfa\xc6\xe4\
\x7f\xd1\x9a\xb5\xe1\x91\x36\xa7\x5e\xc3\x7f\xdc\x9e\xff\x03\xe5\
\xc7\x46\xab\xfc\x64\xec\x71\x00\x00\x00\x00\x49\x45\x4e\x44\xae\
\x42\x60\x82\
\x00\x00\x06\xba\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x06\x37\x49\x44\
\x41\x54\x58\x85\xc5\x97\x6d\x50\x54\xd7\x19\xc7\x7f\xe7\xee\xdd\
\x17\xde\x16\x61\x61\x41\x1b\xc5\x9a\x18\x93\x12\x54\x90\x68\x30\
\x5a\x93\xb6\x98\x48\x12\x27\xd6\x6a\x9b\x8e\xb5\xd0\xd0\x74\xca\
\x58\x6b\x6d\x27\x6d\xd2\xd4\x68\x3e\x74\x1c\x93\x36\x66\x9a\x98\
\x68\xab\x4e\x94\x66\x46\x4c\x68\x8d\x40\xa3\xd4\x5a\x32\x9a\xf0\
\xa2\x18\x06\xa3\x48\x14\x5f\xa2\xf2\xe6\x82\x2c\x2c\xbb\xec\xde\
\xbb\xa7\x1f\x2e\xae\x50\x40\xb6\x92\x69\x9e\x99\xf3\xe1\x9e\x97\
\xe7\xff\x3b\x7b\x9e\xf3\xec\x79\x84\x94\x92\x81\x26\x36\x08\x27\
\xb0\x1c\xc8\xe8\x6f\xc9\x8c\xcd\x5a\x80\xda\xfe\x56\x24\x5f\x94\
\x6d\x83\xf4\x06\x02\x88\x97\xc4\x32\x21\xc5\x16\x89\x4c\x00\x50\
\x4d\xaa\x8c\xb7\x39\xc4\x58\xd4\x3b\x7c\x2e\xa9\xe9\x9a\x00\x10\
\x88\x6b\x52\xc8\x02\xb9\x4e\xee\x1d\x02\x20\x36\x88\xad\xc0\x33\
\x16\x93\x45\xae\x99\xf3\x1b\x91\x3d\x25\x87\xb4\xc4\x19\x58\x55\
\x5b\x78\x4a\x52\x82\x18\xca\xda\xa7\xf9\xa8\x6f\xaf\xa3\xbc\xa9\
\x8c\xcd\x55\x1b\xa5\x5f\xf7\x0b\x60\x9b\x7c\x51\xfe\x24\x04\x20\
\x5e\x12\xcb\x90\x14\xdd\xed\xb8\x97\xb7\x17\xef\xe5\x9e\x84\xd4\
\xb0\x34\xcd\x07\xca\x50\xcb\xf6\xa3\x9e\x38\x8e\x72\xea\x24\xf2\
\x8e\x89\x68\xe9\xb3\xd0\xb3\xe6\xd1\x97\x9b\x0f\x16\xcb\xa0\xf9\
\x0d\xd7\x3e\xe5\x87\xef\x2f\xa3\xd1\x75\x1a\x04\xcb\xe5\x3a\xb9\
\x57\xb0\x1e\xa7\x40\x7c\x6a\x36\x99\x1d\x15\x2b\x6b\x45\x38\xe2\
\xc2\x75\x8d\xc8\xb5\xab\x30\xbf\xbb\x67\xc4\x39\xfa\x7d\xd3\xe9\
\xdd\xba\x13\x7d\x66\xc6\x10\x88\x05\xbb\x32\x64\x40\x0f\xb8\x24\
\x32\xd5\xc4\x43\xe4\x01\xdf\xf9\x65\xd6\x6f\xc5\x92\x7b\x96\x8f\
\x2e\xde\xe1\x22\x26\x2b\x1d\xb5\xea\x63\x00\x64\x74\x34\xde\x8c\
\x2c\x82\x39\x8f\x21\x9d\x49\x28\xee\x2e\x84\xc7\x83\xd2\xd6\x8a\
\x75\xd7\x0e\xf4\x59\xf7\x13\xbc\x73\x6a\x68\x7d\x42\xa4\x13\x2d\
\xa8\x89\xa3\x9f\x57\x44\x02\x97\x14\x8c\x48\x27\x7b\x4a\xce\xa8\
\xe2\x00\x91\x6b\x0a\x50\xae\x5c\x0e\x7d\x6b\x0b\x17\xd1\xf9\x8f\
\x12\x2a\x36\xe5\x72\xb9\x68\x37\x5d\x9f\x5d\xc6\xf7\xdc\x3a\x30\
\x99\x40\xd3\x88\x28\xc8\x47\x74\x5d\x1f\xe4\x63\x80\x56\x86\x02\
\x64\xa8\x26\x55\xa6\x25\xce\x18\x55\xdc\xbc\xaf\x18\xf3\x7b\x45\
\xc6\xce\x63\x63\x8d\xbe\xe2\xbd\x38\x7f\x9c\x4f\xa2\xdf\x89\xb7\
\xcf\x8a\x37\xa0\xe2\x7b\x61\x03\xbd\xaf\x6f\x03\x40\x69\xbe\x4a\
\xc4\x73\xbf\x1a\xe4\x27\x2d\x71\x06\xaa\x49\x95\x37\x00\x92\xe3\
\x6d\x0e\x11\x4e\xb4\x9b\xf7\x15\xf7\x8b\x8f\xa3\xfb\xc8\x31\xf4\
\xcc\xd9\x46\xff\xbb\x7b\xb8\xef\xe9\x35\x5c\x3d\x26\x71\xf7\xf5\
\xd1\xe9\x02\xff\xca\x1f\xa1\x7d\x23\xdb\x18\xdf\xff\xf7\x41\x7e\
\xac\xaa\x8d\xfe\xeb\x9d\xac\x8c\xaa\x3a\xc0\x4c\x27\x8e\x03\xa0\
\x65\x3d\x48\x70\xca\x5d\xf4\x94\x94\xa3\x65\x3d\x18\x12\x99\xb3\
\x71\x09\xcd\xda\x05\xbc\x16\x17\x00\x81\xc7\x16\x03\x46\xdc\x28\
\x97\x2e\x0e\xeb\x33\x6c\x00\xe1\xf3\x61\x3a\xdb\x08\x18\x11\x0e\
\x20\x63\xec\x78\xf6\x1d\x40\x9b\xff\x10\x00\x31\xff\x3e\xc8\x8c\
\x95\x3f\xa0\xd5\xdb\xc0\x95\xee\x4e\xda\xef\x9e\x74\x13\xbe\xbe\
\x6e\x6c\x00\xd2\x66\x43\xc6\xc5\x1b\x8b\x06\x04\xa1\x8c\x8a\xc2\
\xf3\xb7\x32\xb4\x6f\x2e\x04\x20\xae\xa2\x86\x8c\xa7\x56\xd1\x26\
\x8e\xd0\x7d\xfd\x58\x68\x5e\x70\xc2\x57\xc6\x06\x00\xa0\xa7\xcf\
\x02\x40\xad\xa9\x34\x32\xdf\x0d\x88\x88\x08\xda\xf6\x14\xd1\x99\
\xf3\x75\x00\x62\x2b\x3f\x21\xf3\xd1\xe7\x49\xfe\xb0\xff\x67\x37\
\x9b\xd1\x53\xd3\xc2\x07\xf8\x57\x39\x74\xbb\x87\xf6\x6b\xb3\x1f\
\x30\x16\x9d\xfd\x0c\xeb\x9f\xdf\x1c\x3c\x16\x09\x8d\x7f\xdd\x4c\
\xfb\x93\x06\x44\xcc\x89\x93\x38\xb6\x17\x1a\xe0\xd3\x67\x0e\xc9\
\x8a\x23\x02\x34\x9c\x86\x9d\x7f\x81\x57\x37\xc1\xe1\x43\x83\xc7\
\xfa\x56\xaf\x25\x38\xd1\x38\x57\xdb\x0b\xcf\x62\xfe\xa0\x34\x34\
\x66\x97\xb1\x4c\xb3\xa4\xd3\xb8\xe3\x75\x5c\x39\x73\x8d\xce\x60\
\x10\x14\x05\xef\xc6\x3f\x0e\x2b\x0e\xa0\x0e\xfc\x70\xbb\xe1\xdb\
\x8b\xa1\xa5\x19\xa6\x4d\x33\x72\x49\xf5\xc7\xf0\x8b\x67\x8d\x0d\
\xc8\x18\x3b\xbd\x6f\xed\x20\xfa\xf1\x6c\x84\xc7\x43\xd4\xd2\xc7\
\xe9\x5d\xf2\x24\xdd\xdf\x5a\x88\x36\x35\x8b\x71\x5d\x9f\x93\x72\
\xb4\x98\x71\x87\x6b\x6f\x42\xff\xf4\x67\x68\x73\xe7\x8d\x08\x00\
\xeb\x69\x71\xbe\x9c\x24\x5d\xdd\x52\xce\x79\xc0\x23\xad\x56\x29\
\x8d\x03\x96\x32\x2a\x4a\xca\x15\x2b\xa4\x5c\xbd\x5a\xca\x43\x15\
\x52\x76\x7a\x8c\xe6\xd9\xba\x53\x06\x63\xec\x37\x27\x0e\xd7\x84\
\x90\xbe\x67\x0a\xe4\x75\x97\x37\xb4\x6e\x60\x73\xbe\x9c\x24\x59\
\x4f\x4b\xe8\x08\x4a\xf7\xbb\xa9\xae\xfa\x27\x7e\xff\xb9\x10\x9b\
\xc7\x03\x85\x85\xf0\xd1\x47\x70\xa0\x14\xfe\xb4\xd9\xf0\xee\x5f\
\x91\x4b\x77\x4d\x3d\x9e\x47\x1e\x25\x68\x51\x07\x6f\x48\x08\xf4\
\xd4\x34\x7a\xca\x0e\xe1\x7d\xf5\x0d\xa4\xed\xd6\x09\x4e\xb0\x9e\
\x16\x67\x54\x52\xd2\x99\x82\x16\x2a\x0e\xf7\xf0\xd4\xb2\x6a\xfc\
\x7e\x33\xba\x7e\x3f\x70\x73\x71\x7c\x3c\x2c\x5d\x0a\x56\x1b\xe4\
\x3e\x0d\x77\x4e\x85\xda\x40\x0d\x91\xd2\x45\x74\xc3\x25\x26\xd4\
\x69\x58\x52\xee\x45\x9f\x99\x81\xb4\xc7\xde\x52\x14\x60\xda\x96\
\x64\xda\x3c\xad\xad\x83\xf0\x17\x3c\x1c\xcd\x85\xab\x0f\xb3\x74\
\xf1\x69\x8e\x1e\x29\x47\xca\x74\xe0\x0e\x00\x3a\x3a\x60\xfb\x76\
\x98\x3f\x1f\x8a\xde\x81\xa8\x89\xd7\x58\x94\x7f\x1d\x3f\x16\xfc\
\xa9\x8f\xa0\x4c\x4f\x41\x1b\x55\x76\xa8\x0d\xb9\x05\x16\x8b\x60\
\xff\x07\x5f\x63\xcb\xb6\x79\xa8\xea\x49\x4c\xa6\x6a\x20\x00\x18\
\x41\x5d\x51\x01\xbb\x77\x43\x7b\x63\x02\x3b\x7f\xbd\x80\x2b\x2d\
\x76\x12\xbc\xc3\x27\x99\xdb\x02\xb8\x61\xdf\xfb\x7e\x1c\x67\x9a\
\xb2\xb9\x6b\xaa\x19\x21\x0e\x02\xed\xa1\xb1\xe6\x66\x78\xeb\x4d\
\xb8\x78\xce\x42\xcd\x1b\x99\x1c\x28\x31\x7d\xf1\x00\x00\xf1\x0e\
\x13\x95\xc7\xd3\x79\xfe\x77\xb3\x10\x4a\x15\x8a\xe9\x13\x20\x08\
\x80\xa6\x41\x59\x29\x94\x94\x40\x52\x62\x00\x8b\x72\x3b\x07\x10\
\x66\x2a\xfe\x6e\x9e\x9d\x77\x2a\x27\x12\x97\xe0\x46\x51\xca\x01\
\xe3\x81\x61\x36\xc3\xec\x39\x01\xe6\xcf\xeb\x25\xc2\x6a\xc2\x24\
\x82\xb7\x05\xd0\xd2\xe1\x73\xc9\x3e\xcd\x37\xec\x04\x7f\x20\x48\
\xbb\xd2\x4a\xc2\x04\x95\xa2\x8b\x92\x45\xb9\x76\x84\xf8\x10\x21\
\xce\xf0\xd5\x29\x3a\x7f\x78\xa5\x9b\x60\x50\xa7\xb7\xb7\x17\x19\
\xf0\xa0\xeb\xfa\xa8\xa2\x7d\x9a\x8f\x0e\x9f\x4b\x82\x91\x07\x6a\
\x35\x5d\x13\xf5\xed\xc3\xff\x5d\x5a\xcc\x0a\xe3\xed\x76\xc6\xa9\
\x0e\x64\x20\x92\x9f\x6f\xd1\x78\xfb\xe0\x74\xec\xb1\x2d\xbc\xf6\
\x5a\x25\x4d\x4d\x67\xe9\xea\xea\xe2\xfc\xf9\xf3\xa4\xa4\x4c\x62\
\xed\xaa\xfc\x51\x01\xea\xdb\xeb\xe8\xaf\x15\x6a\x15\x8c\x8a\x85\
\xf2\xa6\xb2\x11\x17\x8c\xb7\x38\x48\x8e\x70\x30\xd9\x95\x49\x4a\
\x4f\x26\x4f\xcc\x9d\xcc\x85\x2b\x0b\x98\x34\x79\x3c\x6e\x77\x0f\
\x55\xd5\xd5\xe4\xe5\xe5\xe1\x76\xbb\x29\x2c\xdc\xc5\xf9\x73\x8d\
\xb7\x04\x18\xa0\x55\x1b\xf6\xb3\xbc\xb3\x37\x40\x5c\xa4\x79\x44\
\xa7\xa7\x4e\xd6\x51\xfa\x7e\x31\x0d\x0d\xa7\xf8\xfd\xa6\xcd\x24\
\x25\x0f\x7f\x35\xff\xfb\x59\x3e\xa6\xc2\xe4\x7f\xb5\x61\x0b\x93\
\x2f\xac\x34\x1b\xc1\xc2\x2a\xcd\x6e\xd8\x97\x5a\x9c\x86\x3a\xfe\
\xcf\xe5\xf9\x7f\x00\x5c\x2a\xf6\x6f\xd3\xfe\x08\x52\x00\x00\x00\
\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x06\x2b\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x05\xa8\x49\x44\
\x41\x54\x58\x85\xcd\x97\x7b\x6c\x53\x75\x14\xc7\x3f\xbf\x7b\xdb\
\xae\x6c\xa3\x3a\xda\xb1\x0d\x23\xc3\xa1\x82\x8e\x31\x1c\x43\x60\
\x42\x60\x26\x44\x21\x82\x41\x65\x12\x34\xc8\x12\x9f\x04\x22\x8a\
\x89\x68\x22\x03\xa2\x06\x11\x14\x49\x04\x45\x90\xf0\xc8\x34\x60\
\xd4\xe0\x86\x81\x05\x27\x28\x8f\x0d\x98\x8e\xf1\x18\x8f\xbd\x9c\
\x63\x8f\xae\x1b\x94\x6d\xed\xfa\xb8\x3f\xff\x18\x5c\x3b\xd6\x76\
\xe8\xe2\xe3\x9b\xdc\xa4\x39\xcf\xef\xbd\xbf\xd3\x73\x7e\x47\x48\
\x29\x09\x84\x58\x2e\x06\x02\x59\x40\xda\xb5\x27\x9e\xbe\xa1\x01\
\x28\xb9\xf6\xec\x94\x39\xb2\xa9\x5b\xbe\x40\x02\x62\x85\x98\x25\
\xa4\x58\x2f\x91\x36\x00\x83\x6a\x90\x03\xcc\x56\xd1\x97\xec\x2d\
\x6e\x87\xf4\xf9\x7d\x02\x40\x20\x9a\xa5\x90\xf3\xe5\x52\xb9\xab\
\x07\x01\xb1\x5c\x7c\x0a\x3c\x6f\x52\x4d\x72\xd1\xd8\x25\x62\x4a\
\xd2\x34\x52\x62\x53\x89\x30\x98\xfb\x92\x9f\x4e\x9f\x9b\x32\x7b\
\x29\x05\x95\x7b\x58\x5b\xb4\x52\x7a\xfc\x1e\x01\x6c\x94\x39\xf2\
\x05\x9d\x80\x58\x21\x66\x21\xd9\x79\xb7\xf5\x1e\xb6\xce\xd8\xc5\
\x70\x5b\x32\xa2\xd9\x0e\x80\xb4\xc5\xf6\x89\x40\x20\xca\x9b\x4f\
\xf3\xcc\xee\x59\x9c\x77\x9c\x05\x41\x96\x5c\x2a\x77\x09\x96\x31\
\x50\x20\x4e\x1b\x55\xa3\xf5\xc0\xdc\x12\x31\xdc\x96\x4c\xbf\x35\
\xaf\x62\x74\x17\x02\xe0\x35\x67\xe2\x5a\xfc\x41\xc8\xa0\x9a\x06\
\x8a\xf2\xd7\x48\x4c\xda\x96\x26\xbd\x7e\xaf\x43\x22\x93\x15\x20\
\x4b\x22\x6d\x8b\xc6\x2e\x11\xd7\xdf\xdc\xe8\x2e\x44\x99\xe3\x41\
\x99\xe3\xc1\xe8\x2e\xd4\xbf\x46\x30\x28\x0a\x34\x36\x7a\x6f\x9a\
\xc0\x70\x5b\x32\x8b\xc6\x2e\x11\xd7\xea\x2c\x4b\xa1\xab\xd2\x99\
\x92\x34\x2d\xac\x63\xa7\xbf\x93\x5a\x67\x4d\x50\x5d\xb4\xcd\x47\
\xb1\xf7\x18\xad\xda\xe5\x9b\x22\x11\x90\x2b\x4d\x01\xd2\x0c\xaa\
\x41\xa6\xc4\xa6\x02\x5d\x67\xee\x35\x67\xa2\xe5\x9a\xd0\x72\x4d\
\x78\xcd\x99\x48\x5b\x2c\x2b\x0f\xe5\x90\xb1\x25\x85\xed\x65\x9b\
\x7b\x04\x8c\x52\xfb\x61\xd3\x06\xe1\xea\x8c\xc0\xe5\xea\x9d\x40\
\x4a\x6c\x2a\x06\xd5\x20\x81\x34\xc1\x32\x1a\x06\x46\xc5\xc5\x9d\
\x9b\xdf\xd0\xcd\x28\xb0\x08\x8b\xeb\x0e\x33\xf5\x8b\x89\x68\x52\
\x03\xe0\xa1\xa1\x8f\xb0\xee\xa1\xcf\x18\x18\xd5\xbd\x45\x14\x1f\
\xf1\x91\x78\x8f\x1f\x93\x3f\x82\x18\x6b\x78\x12\xc3\xd6\xc7\xd3\
\xd4\xde\xd8\x18\xb2\x7c\xa4\x2d\x56\xff\x07\x5c\x6c\x3d\x8f\x51\
\x35\xe9\xba\xbd\x15\x79\x8c\xdf\x32\x82\x6f\xcf\xed\xea\xe6\x13\
\x9f\xd4\x49\x9d\x5a\x8f\xcb\xe4\x08\x9f\x3d\x00\x37\x55\xbf\x73\
\x46\xcc\xe3\xc0\xdc\x13\x8c\x8a\x1b\xad\xcb\x5a\x5c\x0e\xb2\x77\
\x67\xf1\x5c\xde\x53\x5c\x76\xb7\x76\x91\x8e\xf0\x50\xee\x3c\xc8\
\x25\x51\x43\xdd\xd5\x56\x1a\x5c\x76\x2e\xfa\x2a\xfa\x4e\x00\x60\
\x98\xf5\x5e\x0a\x9e\x3e\xca\x92\x07\x96\x61\x50\x0c\xba\xfc\xab\
\xb3\xb9\x64\x6c\x49\x61\x7f\xd5\x5e\xde\x3b\xb6\x98\x0d\x07\x57\
\x63\x32\x69\x5c\xb2\x94\x50\x1f\x75\x8a\x28\x93\x1a\x36\xae\x21\
\xac\xf6\x46\x63\xc5\xc0\xeb\x19\x39\x3c\x3c\x74\x3a\x2f\xee\x99\
\x4b\x79\xf3\x69\x00\xea\xdb\xea\x78\xe2\xab\x87\x75\xbb\x93\x55\
\xbf\x30\x32\x61\x1c\x31\x91\x16\xe2\xd5\x44\x08\xd3\xcc\x83\x7e\
\x81\x1f\x0a\xe0\xaa\x33\xb4\x53\x6a\x5c\x1a\x3f\xce\x3d\xc1\xc2\
\x31\xaf\xa1\x88\x9e\x21\xb6\x9f\xdc\x48\x74\x64\x34\xb7\x1b\x12\
\x11\xbd\x4c\x92\x1e\xde\xe5\x67\x61\xcb\x26\xf8\x70\x15\x14\xee\
\x0f\xed\x18\xa1\x46\xb0\x62\xf2\xfb\xe4\xcf\x3e\x40\xb4\xa9\x7f\
\x37\x5d\x71\xf5\x71\x2e\x34\x9d\x09\x9f\x39\x18\x01\xa7\x13\x1e\
\x9b\x01\xf9\xf9\xf0\xfd\xf7\x70\xf8\x20\xbc\xf7\x36\x78\x3c\xa1\
\x03\xfc\xd6\x56\x45\x9b\xe7\x6a\x0f\xf9\xe6\x23\x1b\xf4\xdf\xa2\
\xd9\x1e\xb2\x9b\xea\x35\xa0\x69\x90\x35\xb3\x83\x16\x47\x24\x52\
\x42\x79\x39\xd4\xd6\xc2\xcc\x99\xf0\x4e\x0e\x3c\xfa\x38\xa4\xa5\
\xf7\x0c\x50\xdd\x5e\x41\xd6\xe8\xd9\xd8\x2f\xb7\xe2\x6a\xf3\xe2\
\xd0\xea\x71\x79\xda\x39\xfc\xfb\x01\x4e\x35\x95\x32\x66\xfb\xd6\
\xb0\x73\x45\x27\x90\xff\x9d\x93\xe2\xa2\x1f\x81\x64\x60\x28\x00\
\xed\xed\xb0\x63\x07\xa4\xa7\x83\xd9\x0c\x87\x7e\x86\x05\x2f\xa3\
\x9f\xab\xdd\xd5\xc2\x94\xf4\x09\x4c\x67\x22\x7e\x45\x61\x98\x7b\
\x32\x46\x63\xc0\xa1\x37\x37\xe9\x73\x05\xc0\x98\x5b\x88\xbb\xd9\
\xde\x6d\xc2\xea\x47\x30\xfd\x51\x0b\xdf\x7c\xf7\x20\x66\x73\x0d\
\xaa\xfa\x13\xe0\xd6\x8d\x8e\x1f\x87\x4d\x9b\xa0\xb6\x1a\x96\xbe\
\x01\x15\x17\xba\xe4\xb5\x86\x0a\xcc\x12\x14\x45\x21\xd1\x93\xda\
\x3d\x39\x20\xc2\x95\xff\x8d\x04\x00\x26\x65\x46\x53\x7d\x29\x93\
\xf1\x19\x56\x84\x28\x00\x7e\xd7\x75\x2d\x2d\xb0\x79\x33\x94\x9d\
\x84\x9d\xb9\xb0\xee\xf3\x66\xfa\x47\x5c\x46\x53\x14\x8c\x9e\x24\
\xfa\xab\x03\x7a\x04\x0f\x35\x57\xba\x93\x0c\x31\x0b\xbe\xcc\x6d\
\x65\xe1\x4b\x45\x48\x39\x00\xbf\xff\x3e\xc0\xa8\xeb\x12\x12\xe0\
\xf1\x27\xc0\x2f\x3c\x4c\x7e\xe5\x24\xe3\x22\x47\x61\x89\x0e\xdd\
\x52\x82\x5d\x6e\x7a\x9d\x05\xb3\xe7\xc4\x70\xae\x72\x0a\x77\xde\
\x65\x44\x88\x7d\xc0\x9f\x55\x5c\x5f\x0f\x9f\x6c\x80\x9a\x0a\x13\
\xc7\x3e\x4e\x67\x6f\x5e\xf8\x6e\x17\x38\x57\x6e\x44\xd8\x56\x3c\
\xc0\xaa\x72\xf4\xc4\x7d\xbc\xf9\xd6\x68\x84\x52\x84\xa2\xfe\x0a\
\x74\x4d\x44\x9f\x0f\xf6\xe4\x43\x5e\x1e\xc4\xc5\x7a\x31\x29\xbe\
\xb0\x24\x42\xe1\xa6\x66\xc1\x93\xd9\x16\x72\x8f\xde\x4e\x8c\xcd\
\x89\xa2\x14\x00\x5d\x17\x0f\xa3\x11\xee\x1f\xeb\x65\xe2\x84\x0e\
\xfa\x45\xa8\xa8\x42\xfb\x5b\x04\x1a\x5a\xdc\x0e\xd9\xe9\x73\x07\
\x35\xf0\x78\x35\xec\x4a\x23\xb6\x41\x06\x76\xd6\x48\xa6\xce\xb3\
\x20\xc4\x41\x84\x38\xc7\x1d\x49\x7e\xd6\xac\xbe\x8a\xa6\xf9\xe9\
\xe8\xe8\x40\x7a\xdb\xf1\xfb\xfd\xbd\x26\xed\xf4\xb9\x69\x71\x3b\
\x24\xd0\xa0\x00\x25\x3e\xbf\x4f\x94\xd9\x4b\x83\x1a\x9b\x8c\x0a\
\x09\x16\x0b\xb7\x1a\xac\x48\x6f\x24\x2f\xaf\xf7\xb1\x75\xdf\x48\
\x2c\xb7\x34\xf0\xd1\x47\x47\xa9\xac\xbc\xc8\x95\x2b\x57\xa8\xaa\
\xaa\x22\x31\x71\x30\xaf\x2e\x78\xb6\x57\x02\x65\xf6\x52\xae\xed\
\x0a\x25\x0a\x5d\x1b\x0b\x05\x95\x7b\x42\x3a\x24\x98\xac\xc4\xf7\
\xb3\x32\xc4\x91\x4e\x62\x5b\x3a\xd3\x33\x86\x50\x5d\x37\x89\xc1\
\x43\x12\x70\x3a\xdb\x28\x2a\x2e\x26\x3b\x3b\x1b\xa7\xd3\xc9\x8e\
\x1d\xdb\xa8\xaa\x38\x1f\x96\x40\x40\xae\x92\xa0\xd7\xf2\x60\x68\
\xed\xf0\x12\x13\x69\x0c\xaa\x03\x38\x73\xaa\x94\xfc\xdd\x5f\x53\
\x5e\x7e\x86\x77\x57\xad\x25\x2e\xfe\xb6\xa0\x76\x37\x5e\xcb\x43\
\x2e\x26\xff\x04\x82\x2e\x26\xff\x8b\xd5\xec\x3a\xfe\xd3\xe5\x54\
\x17\xfc\xcb\xeb\xf9\x1f\x0a\x96\x9d\x91\xab\x49\xfa\x0b\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x05\x14\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x04\x91\x49\x44\
\x41\x54\x58\x85\xcd\x97\x6b\x6c\x14\x55\x18\x86\x9f\x33\x33\xbb\
\x5d\x2a\xdd\xda\xee\x16\x0a\x04\x8a\x55\x04\x6d\xb0\xa1\xb6\x36\
\x12\x4c\x83\x09\x3f\x84\x80\x60\xd2\x6a\xc4\x08\x8d\xb7\xc4\x90\
\x80\x26\x2a\x9a\x08\x42\xa2\x21\xa0\x01\x4c\x04\x25\x5c\x02\x34\
\xfc\x68\x13\x69\x80\x12\xa4\xf1\x52\x2f\xa1\x2d\xd0\x58\xcb\xa5\
\x14\xdb\x42\x0a\xb4\x65\xd9\x85\xae\xbd\x2c\x3b\xb3\x73\xfc\xd1\
\x76\xd2\xb2\x5b\xba\xcb\xaa\xf8\x26\x93\xcc\x7c\xe7\x3d\xf3\xbe\
\xf3\x9d\x73\xe6\x9c\x4f\x48\x29\x19\x0a\xb1\x4e\x8c\x03\x8a\x80\
\x9c\x81\x2b\x9d\xf8\xd0\x01\xd4\x0d\x5c\xa5\x72\xad\xbc\x3e\x4c\
\x6f\xa8\x01\xb1\x5e\x14\x0a\x29\xb6\x49\xa4\x1b\x40\x53\x35\x99\
\xea\x70\x89\x78\xd4\x7d\x01\xaf\x34\x42\x86\x00\x10\x88\x1b\x52\
\xc8\xb7\xe5\x1a\x59\x16\x66\x40\xac\x13\xdf\x00\x6f\xda\x55\xbb\
\x5c\x95\xbf\x5a\xcc\xcb\x9c\xcf\xcc\xb4\x6c\x12\x34\x47\x3c\xfa\
\xdc\x36\x02\x34\x78\xea\xa9\x6c\x39\xca\x96\x9a\x0d\x32\x18\x0a\
\x0a\x60\x87\x5c\x2b\xdf\xb2\x0c\x88\xf5\xa2\x10\x49\xe9\xa3\xae\
\xc7\xd8\xbb\xa8\x8c\x19\xee\xac\xb8\x44\x47\x42\xe3\x8d\xb3\x2c\
\x3b\x54\x48\x93\xf7\x3c\x08\x8a\xe4\x1a\x59\x26\xf8\x84\x71\x02\
\x71\xd6\xa6\xda\x5c\x55\xaf\xd6\x89\x58\xc5\x4d\x13\x14\x25\x36\
\x13\x05\xfb\x72\xa4\x1e\xd2\xbd\x12\x99\xa5\x00\x45\x12\xe9\x5e\
\x95\xbf\x3a\x66\x71\xe8\x17\xef\xec\xd4\x01\xa8\xb8\x58\x4e\xca\
\x26\x41\xca\x26\x41\xc5\xc5\xf2\x88\xfc\x19\xee\x2c\x56\xe5\xaf\
\x16\x03\xf3\xac\x48\xa1\x7f\xa6\x33\x2f\x73\x7e\xcc\xe2\x83\x18\
\xeb\x36\xa8\xd5\x4f\xd2\x2d\x7b\xa2\xe2\x0f\xd1\xca\x51\x80\x1c\
\x4d\xd5\xe4\xcc\xb4\xec\x7b\x36\xf0\x80\x3a\x06\xb7\x39\x91\xa0\
\x6e\x8b\x8a\x3f\x33\x2d\x1b\x4d\xd5\xe4\xa0\x81\xf4\x54\x87\x4b\
\xc4\x3b\xdb\x33\x13\x26\xe1\x6d\x55\xa3\xe2\x26\x68\x0e\x06\x96\
\x77\xba\x16\x8f\xe8\x9d\xe3\xdc\xa6\x9d\xb0\xee\x4f\xb7\xd7\x84\
\xf1\x17\x4c\x5b\x1c\x16\x8b\xcb\xc0\x2b\xe5\x4b\x46\x6c\xdb\x5c\
\xb3\x21\x2c\x76\xf3\x3d\x19\x16\x8b\x61\x01\xfd\x3b\x88\x2b\x03\
\x25\x8b\x0f\x5a\xf7\x01\xa9\x53\xd5\x79\x8c\xfd\xd5\xbb\x01\x58\
\x3a\xeb\x35\xf2\x26\xe7\xe3\x52\xd2\xb8\xdb\xbf\x3c\xa2\x81\x1f\
\x2a\x21\x2f\x1f\x92\x9c\x77\x37\x30\x74\x4c\xfd\xa2\x0b\xaf\xb8\
\x66\x3d\xe7\x4c\xce\x63\xd9\xf4\x37\x46\xfd\x88\xb0\x21\x68\x3c\
\x0f\x7b\x76\xc2\xe6\x8d\xf0\xe3\xf7\xa3\xf6\xb7\xe0\x94\xc9\x4c\
\x22\xc3\x7a\xb6\x29\xd1\x25\x77\x18\xcb\xef\x87\x17\x16\x41\x47\
\x3b\x4c\x9f\x0e\xaa\x0a\xb5\x27\xe0\x9d\xf7\xc1\x6e\x8f\xfc\x82\
\x2e\xe3\x26\xba\x0e\x6a\x5f\xf2\xb0\x78\x92\x99\x12\x95\x01\x2b\
\x03\xa6\x09\x45\x4b\x7a\xf1\x79\x41\x4a\x68\x6c\x84\xed\xdb\xe1\
\x4a\x1b\x7c\xba\x16\xea\x4e\x45\x7e\x81\x8f\xeb\x78\x13\x5b\xb9\
\x20\x4e\xe3\xf1\x04\xad\xb8\x2d\xca\x0d\xc2\xca\x40\xc5\x61\x3f\
\xb5\x35\x3f\x01\x59\xc0\xc3\x00\xf4\xf4\x40\x49\x09\xe4\xe6\x82\
\xc3\x01\xbf\xfd\x0a\x2b\x56\x82\x18\x98\x55\x9e\x3e\x1f\xb7\x12\
\xdb\x70\x20\x18\xeb\x56\x58\x9a\x57\xc8\xf2\xd9\x45\x51\x09\x87\
\x65\x60\xe1\xf3\x4e\x0e\x1e\x7e\x16\x87\xe3\x32\xaa\xfa\x0b\x10\
\xb0\x48\xa7\x4e\xc1\xce\x9d\xd0\x76\x09\xd6\x7c\x08\xcd\x17\xfb\
\xe3\x6d\x5a\x33\x0e\x09\x8a\xa2\x90\x11\xcc\xc6\x66\x8b\xfd\xec\
\x32\x2c\x4f\x05\x73\xc7\x72\xe9\xda\x5c\x9e\x9e\xed\x42\x88\x4a\
\xe0\x8a\xd5\xe6\xf3\xc1\xae\x5d\xd0\xf0\x07\x94\x1e\x80\x2f\x77\
\xdf\x20\x29\xe1\x16\xa6\xa2\x60\x0b\x66\x92\xa4\xa6\xc6\x2c\x1e\
\x66\x00\xc0\x6e\x17\x1c\x3e\xf6\x38\xdb\x76\xcc\x41\xd3\xce\xa0\
\xaa\xb5\x40\xff\x76\x6b\x9a\x50\x55\x05\xfb\xf7\x83\xa7\xc9\xcd\
\x9e\x0f\x0a\xb8\xda\xe1\xc4\xdd\x37\xe9\x9e\xc4\x23\x1a\x18\xc4\
\x4b\x2f\xa7\x70\xa1\x65\x1e\x8f\x4c\xb3\x21\xc4\x71\xc0\x63\xb5\
\xb5\xb7\xc3\xd7\xdb\xe1\x72\xb3\x9d\x93\x5f\xe5\xf2\xdd\x91\xe8\
\x36\xa1\x98\x0c\x00\xa4\xba\x54\xaa\x4f\xcf\xe2\xa3\x8f\x9f\x44\
\x28\x35\x28\xea\xef\x80\x09\x80\x61\xc0\xd1\x0a\x38\x72\x04\xc6\
\xa7\xe9\xd8\x15\xe3\x9f\x37\x30\x88\x17\x8b\x9d\x1c\xa8\x9e\x4c\
\x8a\xdb\x8f\xa2\x54\x02\xb7\x00\xb0\xd9\xe0\xa9\x7c\x9d\x67\xe6\
\xf4\x32\x26\x41\x45\x15\xe6\x3d\x19\xe8\xf0\x05\xbc\xf2\xb6\x11\
\x88\x48\x08\xea\x26\x1e\xa5\x13\xf7\x44\x8d\xd2\xcb\x92\xe7\x96\
\x3b\x11\xe2\x67\x84\xb8\xc0\x43\x99\x21\xbe\xf8\xfc\x2f\x4c\x33\
\x44\x6f\x6f\x2f\x52\xef\x21\x14\x0a\x8d\x2a\x7a\xdb\x08\xe0\x0b\
\x78\x25\xd0\xa1\x00\x75\x46\xc8\x10\x0d\x9e\xfa\x88\x64\xbb\x4d\
\x61\x82\xd3\xc9\x83\x9a\x0b\xa9\x27\xb2\x72\x9b\xc1\xde\xe3\x4f\
\xe0\x4c\xee\x60\xeb\xd6\x6a\x5a\x5a\xfe\xa4\xab\xab\x8b\xd6\xd6\
\x56\x32\x32\xa6\xf0\xee\x8a\xd7\x47\x35\xd0\xe0\xa9\x67\xa0\x56\
\xa8\x53\xe8\xaf\x58\xa8\x6c\x39\x3a\x62\x87\x09\x76\x17\xe9\x63\
\x5c\x4c\xf5\xe6\x92\xd1\x9d\xcb\xc2\xd9\x53\xb9\x74\xb5\x80\x29\
\x53\x27\xe0\xf7\x77\x53\x53\x5b\x4b\x71\x71\x31\x7e\xbf\x9f\x92\
\x92\x7d\xb4\x36\x37\xdd\xd5\xc0\x10\xad\xba\xa8\x8f\xe5\x37\x7b\
\x75\x52\x12\x47\x3e\xf3\x9d\x3b\x53\x4f\xc5\xa1\x6f\x69\x6c\x3c\
\xc7\x67\x1b\xb7\x30\x3e\x3d\xf2\xd2\xbc\xf3\x58\x7e\xff\x0b\x93\
\xff\x45\x69\x36\x88\xfb\x5a\x9c\x5a\x81\xff\xb8\x3c\xff\x1b\xdb\
\xdf\xf7\x22\x98\xe1\x5d\x22\x00\x00\x00\x00\x49\x45\x4e\x44\xae\
\x42\x60\x82\
\x00\x00\x04\x6d\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x03\xea\x49\x44\
\x41\x54\x58\x85\xcd\x97\x5f\x4c\x5b\x75\x14\xc7\x3f\xbf\xdb\xdb\
\x52\x99\x6c\x01\x0a\x43\xc9\xb2\xc8\x02\xcc\x20\x21\xa2\x1b\xd9\
\xd3\x7c\xc1\x25\x33\x31\xf3\x01\x8c\x51\x47\x26\x11\xcd\xb2\x44\
\x7c\xd8\xc4\x2d\x6c\xd0\x27\x0c\x11\xcd\xa2\xcb\x24\x99\x66\x8a\
\x89\x19\x0f\xa2\x86\x46\xd2\xcd\x64\x4b\x16\x64\x8e\x26\xc8\x88\
\x0e\x06\x18\x66\xb4\x85\x95\x0d\x05\x7a\x7b\x7b\xdb\x9f\x0f\x94\
\xda\xad\x40\x6f\xa9\x3a\x4f\x72\x93\x7b\xcf\xfd\xfe\xce\xf7\x7b\
\x7f\x7f\xce\x3d\x47\x48\x29\x89\x37\xd1\x2a\xf2\x81\x5a\xa0\x32\
\x7a\x15\x90\x9e\x79\x01\x4f\xf4\x3a\x27\x4f\xc8\xe9\xbb\xf8\xe2\
\x05\x08\xa7\xa8\x11\x52\x9c\x92\x48\x07\x80\x6a\x51\x65\x8e\x3d\
\x57\xa4\xc3\x3e\xab\xf9\xa5\x11\x36\x04\x80\x40\xdc\x92\x42\x1e\
\x94\xc7\x65\x77\x82\x00\xd1\x2a\x3e\x02\x1a\x6c\x16\x9b\x6c\xac\
\x6a\x12\xd5\x45\x7b\x29\xcf\xab\x20\x43\xb5\xa7\xc3\x4f\xd0\xd0\
\x18\x9e\x19\xc2\x3d\xe1\xe2\xfd\x81\x36\xa9\x87\x75\x01\x74\xca\
\x13\xf2\xb5\x98\x00\xe1\x14\x35\x48\xce\x95\xe4\x3e\xca\xd9\x67\
\xbb\xd9\xee\x28\x4b\x8b\x74\x35\xfb\xf9\xd6\x08\x75\x5f\xd7\x30\
\xea\xff\x09\x04\xb5\xf2\xb8\xec\x16\xb4\x90\x2f\x10\x23\x56\x8b\
\x35\xf7\xe2\x7e\x8f\x48\x95\x3c\x12\x01\x45\x49\x4d\xc4\xee\x4f\
\x2b\x65\x28\x1c\xf2\x4b\x64\x99\x02\xd4\x4a\xa4\xa3\xb1\xaa\x29\
\x65\x72\x58\x22\xf7\xf9\x42\x00\xf4\x8e\xf5\x90\xdd\x2e\xc8\x6e\
\x17\xf4\x8e\xf5\xac\x88\xdf\xee\x28\xa3\xb1\xaa\x49\x44\xf7\x59\
\xad\xc2\xd2\x4e\xa7\xba\x68\x6f\xca\xe4\xcb\xf6\xa0\xc3\xe0\x4a\
\xe8\x07\xe6\xe5\x82\x29\x7c\x1c\x57\xa5\x02\x54\xaa\x16\x55\x96\
\xe7\x55\xac\x5b\xc0\x06\xcb\x03\x38\x22\x0f\xa3\x87\xac\xa6\xf0\
\xe5\x79\x15\xa8\x16\x55\x2e\x0b\x28\xc8\xb1\xe7\x8a\x74\x77\x7b\
\x51\x46\x21\xfe\x49\x8b\x29\x6c\x86\x6a\x27\x7a\xbc\x0b\xd4\x74\
\x48\xef\x5d\xe7\x9b\x6a\x7f\xec\x7e\xf0\xf7\x81\x04\xfc\x33\xc5\
\xfb\x12\x7c\x69\x09\x78\xa9\xe7\xb9\x55\xdf\xbd\x37\xd0\x96\xe0\
\xbb\x7d\x58\x26\xf8\x52\x38\x40\xff\x8e\xa5\x35\x03\x5d\xfb\xbe\
\x8c\xdd\x6b\x32\xc4\x45\xdf\xb7\x7c\xf6\xfd\xc7\x00\xbc\xf8\x78\
\x3d\x3b\xb6\x54\x91\xab\xe4\xb1\x56\x2e\x4f\x4b\x40\xfc\x9a\xfe\
\x21\xe6\xf0\x8b\xdf\x62\xcf\x95\x5b\x76\x50\x57\xfa\x6a\xd2\x18\
\x29\x2d\x81\xbb\xcf\x85\xbb\xcf\xb5\xe2\xbb\x8d\x72\x13\x85\x6c\
\x8d\x3d\x5b\x15\x73\xdf\x66\x7a\x06\x74\x5d\xe7\xe8\xa1\x06\x00\
\x76\x8f\x4c\x60\xb3\xd9\x00\x98\x33\x6e\x13\x0a\x81\x25\xb0\xe9\
\x2e\x7c\x56\x24\xdb\x54\x5c\xd3\x33\x70\xfa\x64\x07\xc5\x73\x77\
\x28\x9e\xbb\xc3\xe9\x93\x1d\x31\xff\x2c\xd3\xf8\x33\x27\xb9\x2e\
\x06\x99\x99\xd1\x63\x7e\xab\xc9\x1f\x84\xa0\x05\x6f\xfe\x86\xcd\
\x9b\xaf\x1f\xf4\xae\x0a\x9a\xf6\x79\xd9\xf5\xd8\x36\xfa\x17\x17\
\x01\xd8\x95\x99\x49\xff\xb5\x71\xc4\x46\x1b\xbf\x66\x7a\xb0\x23\
\x08\x2b\x0a\xa5\xda\x53\x58\xad\xe6\xca\x87\xd2\x53\x05\x4c\x2f\
\xf8\x7c\xa6\x64\xb6\x1e\x7e\x83\x7a\xc3\xa0\x04\x28\x01\xea\x0d\
\x03\xe7\x91\x46\x6e\xaa\xe3\xd8\x25\x28\x8a\xc2\x56\xbd\xc2\x34\
\x79\xbc\x25\x15\x30\x78\xf5\x0a\xdf\xb9\xbe\xa1\x59\xff\x7b\x7a\
\x9b\x75\x9d\xf3\xbd\x5f\x31\x35\x3c\x40\x44\x51\xb0\xea\x45\x64\
\x59\x72\x52\x26\x4f\x2a\x40\x4a\xc9\x5b\xaf\xbf\x42\x9b\xa6\x91\
\x15\xe7\xcf\x02\xda\xb4\x20\x1d\x0d\xef\x32\x8f\x15\x47\xa0\x70\
\x5d\xe4\x49\x05\x7c\xf1\xf9\x59\x94\xa9\x5f\xd8\x2f\x13\x53\x68\
\x9d\x94\x64\x4c\x4d\xf3\xe3\x27\xd7\x50\x2d\xeb\x2f\x1b\x57\x15\
\x30\x3f\xff\x27\xce\x23\x6f\xf2\xc1\xc2\xc2\x8a\x99\x4c\x00\x1f\
\x2e\x2c\xd2\xf1\xf6\x51\x22\xe1\xc5\x7f\x5e\x40\xbb\xb3\x99\xa7\
\xf5\x20\x3b\xd7\x18\xbc\x13\xd8\xa3\x07\x69\x77\x36\xa7\x25\xc0\
\x3b\xab\xf9\x65\xd0\xd0\x62\xce\x1b\x63\xa3\x74\x9d\xe9\xe4\x9d\
\x40\x20\x69\x80\xb6\x40\x80\xae\x33\x9d\xdc\x18\x1b\x35\x4d\x1a\
\x34\x34\x66\x35\xbf\x04\xbc\x0a\xe0\x31\xc2\x86\x18\x9e\x19\x8a\
\x01\x8e\x1d\x6a\xa0\x29\xa4\x9b\xea\x48\x0a\x80\xa6\x90\xce\xb1\
\x68\x96\x34\x63\xc3\x33\x43\x44\x7b\x05\x8f\xca\x52\xc7\x72\xc0\
\x3d\xe1\xe2\xc9\x87\xaa\x70\xf7\xb9\xb8\x70\xf9\x12\x4f\x48\x49\
\x8b\xc9\x80\x7a\x38\xcc\x85\xcb\x97\x70\xf7\xb9\xa8\xde\x93\xbc\
\xb6\x74\x4f\xc4\xfe\x27\x9e\x84\xb2\x7c\xc8\xed\x61\x72\x62\x3c\
\x61\x90\x16\x8a\x60\xb7\xae\x9d\x36\x1e\x29\xda\xc6\xf3\x2f\xbc\
\xbc\x26\xe6\xde\xb2\xfc\xfe\x37\x26\xff\x8b\xd6\x6c\xd9\xee\x6b\
\x73\x1a\x73\xfc\xc7\xed\xf9\x5f\x29\x44\xcb\x09\xd0\x3d\xca\x90\
\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x04\x11\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x03\x8e\x49\x44\
\x41\x54\x58\x85\xcd\x97\x5f\x68\x5b\x55\x1c\xc7\x3f\x27\xb9\x69\
\x33\xf7\x50\x5d\x92\x91\xe9\x5b\x1f\x64\x5a\x4a\xbb\x4c\x8d\x8e\
\xa1\x3e\x98\x81\x2d\x53\x28\x4d\x41\x1f\x16\x64\xa0\xd0\x97\x96\
\x0a\xa3\x4f\xab\xeb\x93\x04\x2c\x6e\x0f\x03\x1d\x7b\x58\x1f\x7c\
\x58\x47\xc7\x3a\x28\x74\x45\xac\x65\xeb\xdc\x4b\xb6\x5a\x8a\x63\
\x0f\xf5\x69\xa3\x24\xb9\xd5\xb2\x3f\xa4\xc9\x4d\x7f\x3e\x9c\x64\
\xb6\xc9\xbd\x37\x89\x9d\xba\x2f\x1c\x38\x9c\xfb\x3b\xdf\xef\xf7\
\xde\x7b\xce\xf9\xfd\x8e\x12\x11\xb6\x42\x9d\x52\x7b\x81\x3e\x20\
\x52\x6a\x61\x76\x86\x55\x20\x55\x6a\x17\x65\x44\xd2\xdb\xf4\xb6\
\x1a\x50\xa3\x2a\xae\x44\x9d\x15\x24\x08\x60\x78\x0d\xd9\xe3\x0f\
\xa8\x9d\xa8\xaf\xe5\x4c\xb1\x8a\x96\x02\x50\xa8\xac\x28\xe9\x97\
\x93\x32\x51\x65\x40\x9d\x52\xdf\x01\x9f\x37\x79\x9b\x64\x30\x3a\
\xac\x62\xad\x5d\xb4\x87\x3a\x68\x36\xfc\x3b\xd1\x67\xc3\xca\xb1\
\x94\x59\x64\x76\x65\x9a\x6f\x6f\x7d\x2d\xf9\x62\x5e\x01\xdf\xcb\
\x88\x7c\xf1\xd4\x80\x1a\x55\x71\x84\x8b\xaf\x06\x5e\xe3\xc2\x47\
\x13\xec\x0f\xb6\xed\x48\xd4\x09\x77\xb3\xcb\x24\xa6\xe2\xdc\x33\
\x7f\x03\x45\x9f\x9c\x94\x09\xc5\x57\xec\x55\xa8\x65\x9f\xd7\x17\
\xf8\xf9\x58\x4a\x35\x2a\xbe\xb9\x09\x1e\x4f\x63\x26\xde\x1b\x8f\
\x48\xa1\x58\x30\x05\x69\x33\x80\x3e\x41\x82\x83\xd1\xe1\x9a\x6f\
\xbe\xbe\x0e\x0b\xd7\xe1\xce\xed\x52\x4b\xe9\xf1\xd7\xdb\x84\xb7\
\xde\x56\x74\x1e\x80\x43\x87\xa1\xa5\xc5\x99\x63\x7f\xb0\x8d\xc1\
\xe8\xb0\x4a\x2e\x8c\x06\x81\x3e\x03\xbd\xd2\x89\xb5\x76\xb9\x8a\
\x5f\xbd\x02\x43\x03\x90\xcd\x54\x3f\x4b\xa7\x15\x73\x3f\xe9\x7e\
\x30\x04\x63\xa7\xe1\xe8\xc7\xce\x5c\xb1\xd6\x2e\x92\x0b\xa3\x00\
\x11\x0f\x10\x31\xbc\x86\xb4\x87\x3a\x6c\x83\x4d\x13\x8e\x27\xe0\
\xd8\xa7\xf6\xe2\x95\xc8\x66\x74\xec\xf1\x84\x9e\x6b\x87\xf6\x50\
\x07\x86\xd7\x90\xb2\x81\xf0\x1e\x7f\x40\xd9\xad\x76\xd3\x84\x77\
\xdf\x81\xc9\x4b\xb5\x85\x2b\x31\x79\x49\xcf\xb5\x33\xd1\x6c\xf8\
\x29\x6d\xef\xb0\xeb\xf2\x39\x31\x04\x0f\xee\x37\x2e\x5e\xc6\x83\
\xfb\x70\xe2\x4b\xf7\x18\x47\x03\x57\xaf\xb8\xbf\xf9\xee\xdd\x30\
\x33\x03\xd7\xae\xe9\xbe\x13\x26\x27\x34\x57\x43\x06\xd6\xd7\xf5\
\x82\x73\x43\x34\x0a\x47\x8e\x40\x2c\x06\x07\x0f\xba\xc7\x0e\x0d\
\x68\x4e\x3b\x18\x76\x83\x0b\xd7\x6b\x2f\xb8\xf9\x79\x38\x73\x06\
\x44\xe0\xc6\x0d\xf7\xd8\x6c\x46\x73\x7e\xd8\x5d\xa7\x81\x3b\xb7\
\xdd\x09\x01\x2c\x0b\x06\x6a\x7c\xa5\x4a\x4e\x3b\x03\xb6\xbf\xa0\
\x96\x81\x40\x00\x2e\x5f\x86\x5c\x4e\xb7\x68\xb4\x3e\x03\x76\xb0\
\x37\x90\x72\x27\x33\x4d\x38\x77\x0e\x9a\x9b\x75\xab\xe7\x28\x76\
\xe2\x6c\xe0\x14\xdf\x8e\x47\x8f\xfe\xe9\xcc\x3a\x0c\x74\x46\x9e\
\x0d\x79\x3d\x9c\xf6\x06\x0e\xfc\x0b\x06\x1c\x38\x9f\x4f\x03\x87\
\x0e\xeb\xac\xe6\x06\x63\xcb\x06\xf6\xf9\xdc\x63\x83\x21\xcd\x59\
\xb7\x81\x96\x16\x9d\x52\x9d\xe0\xf3\xe9\x13\xb0\x8c\xee\x6e\x77\
\x13\x63\xa7\x9d\x6b\x04\xdb\x83\x08\x74\x3e\xef\xe9\xb5\xcf\x07\
\x7e\xbf\xce\x03\x33\x33\x7f\x8f\xed\xda\x05\x85\x42\x75\x6c\x4f\
\xdc\xbd\x36\x70\x34\x00\x90\x1c\x83\x5f\x6e\x56\x67\xc4\x87\x0f\
\x61\x6e\xce\x6d\xa6\xc6\xcb\xaf\x40\xf2\x1b\xf7\x18\x0f\xb0\xba\
\x96\x33\x65\xc3\xca\x55\x3d\x0c\x04\xe0\xc7\xf9\x4d\xde\x3f\xfa\
\xb8\xb6\x5a\x05\x7a\x7a\x61\xfe\xa6\xe6\xa8\xc4\x86\x95\x63\x2d\
\x67\x0a\xb0\xea\x01\x52\x56\xd1\x52\x4b\x99\x45\x5b\xa2\x70\xd8\
\xc3\xd9\xf1\x1c\xc9\xf3\x7f\xf2\x62\xc8\xaa\x29\x1c\x0c\xc1\xf8\
\x0f\x70\xfe\x82\xbd\x38\xc0\x52\x66\x91\xd2\x5d\x21\x65\xa0\x6f\
\x2c\x9f\xcd\xae\x4c\xf3\xc6\x3e\xfb\x43\x7d\x5f\x53\x80\x4f\x7a\
\x0b\x7c\xf0\xa6\x97\x5b\xbf\xe6\xf9\x7d\xb9\x69\x5b\x51\xda\x19\
\xd1\xdb\xac\x9e\xa2\x14\x60\x76\x65\xba\xdc\x4d\xd5\x5d\x96\xff\
\xf1\xa4\xc0\x4b\x2f\xd4\xd8\x6f\x75\xa0\xb2\x2c\xf7\xc8\x88\xa4\
\x45\x49\x7f\xbe\x98\x57\x89\xa9\x38\x77\xb3\xcb\xb6\x13\x9f\x95\
\x78\x62\x2a\x4e\xbe\x98\x57\xa2\xa4\x5f\x46\x24\xfd\x7c\x5c\xcd\
\xca\xf8\x5f\x2f\xa7\x4f\x07\xfe\xe3\xeb\xf9\x5f\x3d\x2c\x89\x34\
\xbf\x76\x48\x41\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\
"
qt_resource_name = "\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x0f\
\x07\x2d\x6a\xa2\
\x00\x47\
\x00\x61\x00\x7a\x00\x65\x00\x74\x00\x74\x00\x65\x00\x65\x00\x72\x00\x45\x00\x64\x00\x69\x00\x74\x00\x6f\x00\x72\
\x00\x0f\
\x0d\x8e\x9e\xa7\
\x00\x61\
\x00\x64\x00\x64\x00\x73\x00\x65\x00\x6c\x00\x65\x00\x63\x00\x74\x00\x65\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0d\
\x0d\xa7\x3c\x47\
\x00\x65\
\x00\x64\x00\x69\x00\x74\x00\x6e\x00\x6f\x00\x64\x00\x65\x00\x73\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x11\
\x08\xcd\x35\xc7\
\x00\x73\
\x00\x65\x00\x61\x00\x72\x00\x63\x00\x68\x00\x70\x00\x6f\x00\x69\x00\x6e\x00\x74\x00\x65\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\
\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x09\
\x04\x01\x98\x27\
\x00\x61\
\x00\x64\x00\x6d\x00\x69\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0f\
\x0d\x8e\xab\x07\
\x00\x64\
\x00\x65\x00\x6c\x00\x73\x00\x65\x00\x6c\x00\x65\x00\x63\x00\x74\x00\x65\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0c\
\x05\xb2\x40\x67\
\x00\x65\
\x00\x64\x00\x69\x00\x74\x00\x73\x00\x61\x00\x76\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0e\
\x0f\xaa\x6d\x27\
\x00\x65\
\x00\x64\x00\x69\x00\x74\x00\x63\x00\x61\x00\x6e\x00\x63\x00\x65\x00\x6c\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0d\
\x08\xb6\x25\x27\
\x00\x65\
\x00\x64\x00\x69\x00\x74\x00\x73\x00\x68\x00\x69\x00\x66\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0b\
\x04\x21\x36\x27\
\x00\x65\
\x00\x64\x00\x69\x00\x74\x00\x6e\x00\x65\x00\x77\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0b\
\x0b\x6e\xcc\xc7\
\x00\x6e\
\x00\x65\x00\x77\x00\x66\x00\x65\x00\x61\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x08\
\x0c\x33\x5a\x87\
\x00\x68\
\x00\x65\x00\x6c\x00\x70\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x0c\x00\x00\x00\x03\
\x00\x00\x00\xba\x00\x00\x00\x00\x00\x01\x00\x00\x18\x1b\
\x00\x00\x01\x56\x00\x00\x00\x00\x00\x01\x00\x00\x34\xe0\
\x00\x00\x00\xf6\x00\x00\x00\x00\x00\x01\x00\x00\x21\xfb\
\x00\x00\x01\x36\x00\x00\x00\x00\x00\x01\x00\x00\x2e\xb1\
\x00\x00\x00\x7c\x00\x00\x00\x00\x00\x01\x00\x00\x0b\x17\
\x00\x00\x00\xa4\x00\x00\x00\x00\x00\x01\x00\x00\x11\xe6\
\x00\x00\x01\x72\x00\x00\x00\x00\x00\x01\x00\x00\x39\xf8\
\x00\x00\x01\x8e\x00\x00\x00\x00\x00\x01\x00\x00\x3e\x69\
\x00\x00\x00\x38\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\xd2\x00\x00\x00\x00\x00\x01\x00\x00\x1d\x15\
\x00\x00\x00\x5c\x00\x00\x00\x00\x00\x01\x00\x00\x04\xf8\
\x00\x00\x01\x14\x00\x00\x00\x00\x00\x01\x00\x00\x27\xf3\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 61.486284
| 129
| 0.724732
|
b4adf53face319e88f2f538899fa89bd95dcb535
| 4,879
|
py
|
Python
|
tests/kafkatest/tests/copycat_test.py
|
yobennett/kafka
|
e582447adb4708731aff74aa294e7ce2b30b0a41
|
[
"Apache-2.0"
] | null | null | null |
tests/kafkatest/tests/copycat_test.py
|
yobennett/kafka
|
e582447adb4708731aff74aa294e7ce2b30b0a41
|
[
"Apache-2.0"
] | null | null | null |
tests/kafkatest/tests/copycat_test.py
|
yobennett/kafka
|
e582447adb4708731aff74aa294e7ce2b30b0a41
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kafkatest.tests.kafka_test import KafkaTest
from kafkatest.services.copycat import CopycatStandaloneService
from kafkatest.services.console_consumer import ConsoleConsumer
from ducktape.utils.util import wait_until
from ducktape.mark import parametrize
import hashlib, subprocess, json
class CopycatStandaloneFileTest(KafkaTest):
"""
Simple test of Copycat that produces data from a file in one Copycat
standalone process and consumes it on another, validating the output is
identical to the input.
"""
INPUT_FILE = "/mnt/copycat.input"
OUTPUT_FILE = "/mnt/copycat.output"
OFFSETS_FILE = "/mnt/copycat.offsets"
TOPIC = "test"
FIRST_INPUT_LIST = ["foo", "bar", "baz"]
FIRST_INPUT = "\n".join(FIRST_INPUT_LIST) + "\n"
SECOND_INPUT_LIST = ["razz", "ma", "tazz"]
SECOND_INPUT = "\n".join(SECOND_INPUT_LIST) + "\n"
SCHEMA = { "type": "string", "optional": False }
def __init__(self, test_context):
super(CopycatStandaloneFileTest, self).__init__(test_context, num_zk=1, num_brokers=1, topics={
'test' : { 'partitions': 1, 'replication-factor': 1 }
})
self.source = CopycatStandaloneService(test_context, self.kafka, [self.INPUT_FILE, self.OFFSETS_FILE])
self.sink = CopycatStandaloneService(test_context, self.kafka, [self.OUTPUT_FILE, self.OFFSETS_FILE])
self.consumer_validator = ConsoleConsumer(test_context, 1, self.kafka, self.TOPIC, consumer_timeout_ms=1000)
@parametrize(converter="org.apache.kafka.copycat.json.JsonConverter", schemas=True)
@parametrize(converter="org.apache.kafka.copycat.json.JsonConverter", schemas=False)
@parametrize(converter="org.apache.kafka.copycat.storage.StringConverter", schemas=None)
def test_file_source_and_sink(self, converter="org.apache.kafka.json.JsonConverter", schemas=True):
assert converter != None, "converter type must be set"
# Template parameters
self.key_converter = converter
self.value_converter = converter
self.schemas = schemas
# These need to be set
self.source.set_configs(self.render("copycat-standalone.properties"), self.render("copycat-file-source.properties"))
self.sink.set_configs(self.render("copycat-standalone.properties"), self.render("copycat-file-sink.properties"))
self.source.start()
self.sink.start()
# Generating data on the source node should generate new records and create new output on the sink node
self.source.node.account.ssh("echo -e -n " + repr(self.FIRST_INPUT) + " >> " + self.INPUT_FILE)
wait_until(lambda: self.validate_output(self.FIRST_INPUT), timeout_sec=60, err_msg="Data added to input file was not seen in the output file in a reasonable amount of time.")
# Restarting both should result in them picking up where they left off,
# only processing new data.
self.source.restart()
self.sink.restart()
self.source.node.account.ssh("echo -e -n " + repr(self.SECOND_INPUT) + " >> " + self.INPUT_FILE)
wait_until(lambda: self.validate_output(self.FIRST_INPUT + self.SECOND_INPUT), timeout_sec=60, err_msg="Sink output file never converged to the same state as the input file")
# Validate the format of the data in the Kafka topic
self.consumer_validator.run()
expected = json.dumps([line if not self.schemas else { "schema": self.SCHEMA, "payload": line } for line in self.FIRST_INPUT_LIST + self.SECOND_INPUT_LIST])
decoder = (json.loads if converter.endswith("JsonConverter") else str)
actual = json.dumps([decoder(x) for x in self.consumer_validator.messages_consumed[1]])
assert expected == actual, "Expected %s but saw %s in Kafka" % (expected, actual)
def validate_output(self, value):
try:
output_hash = list(self.sink.node.account.ssh_capture("md5sum " + self.OUTPUT_FILE))[0].strip().split()[0]
return output_hash == hashlib.md5(value).hexdigest()
except subprocess.CalledProcessError:
return False
| 51.357895
| 182
| 0.71613
|
a758c3c4ceeeb41e01cf1006cf4decae59ae3a11
| 397
|
py
|
Python
|
data/dls/update_xyz.py
|
wjm41/soapgp
|
ef57cebb7413abb96b54983141e188dff5166d03
|
[
"MIT"
] | 18
|
2020-05-02T19:50:31.000Z
|
2022-02-11T16:07:52.000Z
|
data/dls/update_xyz.py
|
SuperXiang/soapgp
|
ef57cebb7413abb96b54983141e188dff5166d03
|
[
"MIT"
] | 1
|
2020-11-09T20:47:43.000Z
|
2020-11-16T21:01:35.000Z
|
data/dls/update_xyz.py
|
SuperXiang/soapgp
|
ef57cebb7413abb96b54983141e188dff5166d03
|
[
"MIT"
] | 9
|
2020-11-22T17:23:29.000Z
|
2022-02-16T05:47:06.000Z
|
import pandas as pd
import sys
smiles_name = sys.argv[1]+'.can'
xyz_name = sys.argv[1]+'.xyz'
SMILES_df = pd.read_csv(smiles_name, header=0, names=['smiles','name'])
i=0
xyz_file = open(xyz_name,'r')
for line in xyz_file:
if line=='\n':
myrow = SMILES_df.iloc[i]
line = 'smiles="'+myrow['smiles']+'" tag="'+str(myrow['name'])+'" \n'
i+=1
sys.stdout.write(line)
| 22.055556
| 77
| 0.607053
|
4ed7315ceb9c437589df2e86605242f991771a9a
| 6,249
|
py
|
Python
|
vismrc.py
|
lqhuang/SOD-xfel
|
8d4fd0cd18bb9417eea682987eeea19542920620
|
[
"Python-2.0",
"OLDAP-2.7",
"OLDAP-2.8"
] | 1
|
2017-01-18T14:55:40.000Z
|
2017-01-18T14:55:40.000Z
|
vismrc.py
|
lqhuang/SOD-cryoem
|
5246f1f37c961234c68a1155ac91485935c293a4
|
[
"Python-2.0",
"OLDAP-2.7",
"OLDAP-2.8"
] | null | null | null |
vismrc.py
|
lqhuang/SOD-cryoem
|
5246f1f37c961234c68a1155ac91485935c293a4
|
[
"Python-2.0",
"OLDAP-2.7",
"OLDAP-2.8"
] | null | null | null |
#!/usr/bin/env python2
from __future__ import print_function, division
# First, and before importing any Enthought packages, set the ETS_TOOLKIT
# environment variable to qt4, to tell Traits that we will use Qt.
import sys
import os
os.environ['ETS_TOOLKIT'] = 'qt4'
# By default, the PySide binding will be used. If you want the PyQt bindings
# to be used, you need to set the QT_API environment variable to 'pyqt'
#os.environ['QT_API'] = 'pyqt'
# To be able to use PySide or PyQt4 and not run in conflicts with traits,
# we need to import QtGui and QtCore from pyface.qt
from pyface.qt import QtGui, QtCore
# Alternatively, you can bypass this line, but you need to make sure that
# the following lines are executed before the import of PyQT:
# import sip
# sip.setapi('QString', 2)
from traits.api import HasTraits, Range, Instance, on_trait_change
from traitsui.api import View, Item, Group
from mayavi.core.api import PipelineBase
from mayavi.core.ui.api import MayaviScene, MlabSceneModel, SceneEditor
from mayavi import mlab
# from qtvis import *
from cryoio import mrc
import cryoem
from visualizer import plot_density
import numpy as np
class MrcVisualization(HasTraits):
# FIXME: adjust contour level of density map
level = Range(0, 100, 20) # mode='spinner'
scene = Instance(MlabSceneModel, ())
density_plot = Instance(PipelineBase)
# the layout of the dialog screated
view = View(Item('scene', editor=SceneEditor(scene_class=MayaviScene),
height=250, width=300, show_label=False),
Group('level'),
resizable=True # We need this to resize with the parent widget
)
alignedM = None
color=(0.75, 0.75, 0.75)
opacity=1
@on_trait_change('level,scene.activated')
def update_plot(self):
# This function is called when the view is opened. We don't
# populate the scene when the view is not yet open, as some
# VTK features require a GLContext.
# We can do normal mlab calls on the embedded scene.
# self.scene.mlab.test_points3d()
if self.alignedM is None:
pass
else:
if self.density_plot is None:
self.density_plot = self.plot_density(self.alignedM)
else:
# FIXME: update plot with specific level of contour
pass
def plot_density(self, s, level=0.2, ret_contour=False):
self.scene.mlab.gcf().scene.background = (1,1,1)
self.scene.mlab.gcf().scene.foreground = (0,0,0)
src = self.scene.mlab.pipeline.scalar_field(s)
mins = s.min()
ptps = s.ptp()
curr_contour = mins + level * ptps
if ret_contour:
return src, curr_contour
else:
density_plot = self.scene.mlab.pipeline.iso_surface(src, contours=[curr_contour,],
opacity=self.opacity, color=self.color)
return density_plot
def setup(self, alignedM):
self.alignedM = alignedM
class MayaviQWidget(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.curr_layout = QtGui.QVBoxLayout(self)
self.curr_layout.setContentsMargins(0,0,0,0)
self.curr_layout.setSpacing(0)
def setup(self, alignedM, filename=None):
if filename:
label = QtGui.QLabel(self)
label.setText("Model: {}".format(filename))
label.setAlignment(QtCore.Qt.AlignTop | QtCore.Qt.AlignHCenter)
self.curr_layout.addWidget(label)
self.visualization = MrcVisualization()
self.visualization.setup(alignedM)
# If you want to debug, beware that you need to remove the Qt
# input hook.
#QtCore.pyqtRemoveInputHook()
#import pdb ; pdb.set_trace()
#QtCore.pyqtRestoreInputHook()
# The edit_traits call will generate the widget to embed.
self.ui = self.visualization.edit_traits(parent=self,
kind='subpanel').control
self.curr_layout.addWidget(self.ui)
self.ui.setParent(self)
class MRCVisualizerQWidget(QtGui.QWidget):
def __init__(self, parent=None, mrcfiles=[]):
QtGui.QWidget.__init__(self, parent)
Ms = [mrc.readMRC(mrcfile) for mrcfile in mrcfiles]
if len(mrcfiles) < 6:
hbox = True
layout = QtGui.QHBoxLayout(self)
else:
hbox = False
layout = QtGui.QGridLayout(self)
maxcol = int(len(mrcfiles) / 3) + 1
layout.setContentsMargins(0,0,0,0)
layout.setSpacing(0)
for i, M in enumerate(Ms):
filename = os.path.basename(mrcfiles[i])
self.splitter_main_bottom = QtGui.QSplitter(self)
if hbox:
layout.addWidget(self.splitter_main_bottom)
else:
row, col = np.unravel_index(i, (3, maxcol))
layout.addWidget(self.splitter_main_bottom, row, col)
self.splitter_main_bottom.setOrientation(QtCore.Qt.Horizontal)
# self.sliceplot_widget = SlicePlotQWidget()
# self.splitter_main_bottom.addWidget(self.sliceplot_widget)
self.densityplot_widget = MayaviQWidget()
self.splitter_main_bottom.addWidget(self.densityplot_widget)
self.alignedM,self.R = cryoem.align_density(M, upsamp=1.0)
self.densityplot_widget.setup(self.alignedM, filename=filename)
# self.sliceplot_widget.setup(M, self.R)
if __name__ == "__main__":
# Don't create a new QApplication, it would unhook the Events
# set by Traits on the existing QApplication. Simply use the
# '.instance()' method to retrieve the existing one.
app = QtGui.QApplication.instance()
print(sys.argv)
if len(sys.argv) >= 2:
mrcfiles = sys.argv[1:]
else:
assert False, 'Need mrc file as argument'
container = MRCVisualizerQWidget(mrcfiles=mrcfiles)
window = QtGui.QMainWindow()
window.setWindowTitle("CryoEM MRC Visualizer")
window.setCentralWidget(container)
window.show()
# Start the main event loop.
app.exec_()
| 34.910615
| 94
| 0.645703
|
06d1fb4885cdc6be18ffb6b255e1451edf04b0bd
| 20
|
py
|
Python
|
simplenmt/__init__.py
|
hannlp/SimpleNMT
|
c071df13cbdb2885a8d0080fa73d412c86f5226a
|
[
"MIT"
] | 21
|
2021-03-08T03:46:00.000Z
|
2022-03-07T11:30:19.000Z
|
simplenmt/__init__.py
|
hannlp/SimpleNMT
|
c071df13cbdb2885a8d0080fa73d412c86f5226a
|
[
"MIT"
] | 2
|
2021-11-24T03:17:35.000Z
|
2021-12-16T08:13:49.000Z
|
simplenmt/__init__.py
|
hannlp/SimpleNMT
|
c071df13cbdb2885a8d0080fa73d412c86f5226a
|
[
"MIT"
] | 2
|
2021-03-13T04:58:41.000Z
|
2021-09-15T03:01:50.000Z
|
__version__ = "0.2"
| 10
| 19
| 0.65
|
dca708b3ec8fbc478564eaed57004202572b084a
| 1,452
|
py
|
Python
|
venv/lib/python3.6/site-packages/phonenumbers/data/region_UY.py
|
exdeam/opencrm
|
dfdcfdf99f0b42eb3959171927cb6574583f5ee0
|
[
"MIT"
] | null | null | null |
venv/lib/python3.6/site-packages/phonenumbers/data/region_UY.py
|
exdeam/opencrm
|
dfdcfdf99f0b42eb3959171927cb6574583f5ee0
|
[
"MIT"
] | null | null | null |
venv/lib/python3.6/site-packages/phonenumbers/data/region_UY.py
|
exdeam/opencrm
|
dfdcfdf99f0b42eb3959171927cb6574583f5ee0
|
[
"MIT"
] | null | null | null |
"""Auto-generated file, do not edit by hand. UY metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_UY = PhoneMetadata(id='UY', country_code=598, international_prefix='0(?:0|1[3-9]\\d)',
general_desc=PhoneNumberDesc(national_number_pattern='(?:[249]\\d\\d|80)\\d{5}|9\\d{6}', possible_length=(7, 8), possible_length_local_only=(7,)),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:2\\d|4[2-7])\\d{6}', example_number='21231234', possible_length=(8,), possible_length_local_only=(7,)),
mobile=PhoneNumberDesc(national_number_pattern='9[1-9]\\d{6}', example_number='94231234', possible_length=(8,)),
toll_free=PhoneNumberDesc(national_number_pattern='80[05]\\d{4}', example_number='8001234', possible_length=(7,)),
premium_rate=PhoneNumberDesc(national_number_pattern='90[0-8]\\d{4}', example_number='9001234', possible_length=(7,)),
preferred_international_prefix='00',
national_prefix='0',
preferred_extn_prefix=' int. ',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='(\\d{3})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['8|90'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{4})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['[24]']),
NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['9'], national_prefix_formatting_rule='0\\1')])
| 85.411765
| 162
| 0.704545
|
105f50bd4c53fb384ea6a6c8dbd4e28b9b2b59f3
| 2,217
|
py
|
Python
|
database.py
|
3anga/brianbot
|
3eb6192409beb2d6998a64a6b4d865293e751c95
|
[
"Unlicense"
] | null | null | null |
database.py
|
3anga/brianbot
|
3eb6192409beb2d6998a64a6b4d865293e751c95
|
[
"Unlicense"
] | null | null | null |
database.py
|
3anga/brianbot
|
3eb6192409beb2d6998a64a6b4d865293e751c95
|
[
"Unlicense"
] | null | null | null |
import sqlite3
class Database:
def __init__(self, **kw):
self.db = kw['db']
self.connection = sqlite3.connect(kw['db'],
isolation_level=None,
check_same_thread=False)
self.cursor = self.connection.cursor()
def __del__(self):
self.cursor.close()
self.connection.close()
def __str__(self):
return f"<Database '{self.db}'>"
def profileExists(self, DISCORDID = None):
if self.getProfile(DISCORDID) is None: return False
return True
def getProfile(self, DISCORDID = None):
with self.connection:
PROFILE = self.cursor.execute(f"SELECT * FROM profiles WHERE discordId={DISCORDID}").fetchone()
if PROFILE is None: return None
return {
'discordId': PROFILE[0],
'hashSource': PROFILE[1],
'vkId': PROFILE[2],
'msgCount': PROFILE[3],
'level': PROFILE[4],
'isPatriot': PROFILE[5],
'isMilitary': PROFILE[6],
'isBanned': PROFILE[7],
'warns': PROFILE[8],
'dateCreated': str(PROFILE[9])
}
def updateProfile(self, DISCORDID = None, NEWDATA = None):
if self.profileExists(DISCORDID) is True:
set_ = ''
lastValue = list(NEWDATA.values())[-1]
for key, value in NEWDATA.items():
set_ += f"{key}={value}"
if value != lastValue: set_ += ","
with self.connection:
return self.cursor.execute(f"UPDATE profiles SET {set_} WHERE discordId={DISCORDID}")
else:
return None
def createProfile(self, **kw):
if self.profileExists(kw['discordId']) is False:
values = ''
lastValue = list(kw.values())[-1]
for key, value in kw.items():
values += "?"
if value != lastValue: values += ","
with self.connection:
return self.cursor.execute(f"INSERT INTO profiles ({','.join(list(kw.keys()))}) VALUES ({values})", tuple(kw.values()))
else: return None
| 36.344262
| 135
| 0.520523
|
00fcdf6699f093f7a1ccfb5b214012c386989813
| 6,346
|
py
|
Python
|
kolab/tokibi/verb.py
|
roy029/kolab
|
10a3054da5e7c96c575de1336056eee65368c087
|
[
"MIT"
] | null | null | null |
kolab/tokibi/verb.py
|
roy029/kolab
|
10a3054da5e7c96c575de1336056eee65368c087
|
[
"MIT"
] | 1
|
2021-11-14T05:38:27.000Z
|
2021-11-14T05:38:27.000Z
|
kolab/tokibi/verb.py
|
roy029/kolab
|
10a3054da5e7c96c575de1336056eee65368c087
|
[
"MIT"
] | 7
|
2020-11-02T13:05:44.000Z
|
2022-01-09T11:06:04.000Z
|
from sys import setrecursionlimit
from janome.tokenizer import Tokenizer
# Verb
class Verb(object):
base: str
vpos: str
mode: int
def _init__(self, base, mode, vpos=None):
self.base = base
self.mode = mode
self.vpos = vpos
# VPOS タイプ
VS = 'VS' # サ変
VZ = 'VZ' # サ変
VK = 'VK' # カ変
V1 = 'V1' # 上一段、下一段
VK5 = 'VK5' # カ行五段活用
VS5 = 'VS5' # サ行五段活用
VT5 = 'VT5' # タ行五段活用
VN5 = 'VN5' # ナ行五段活用
VM5 = 'VM5' # マ行五段活用
VR5 = 'VR5' # ラ行五段活用
VW5 = 'VW5' # ワ行五段活用
VG5 = 'VG5' # ガ行五段活用
VB5 = 'VB5' # バ行五段活用
ADJ = 'ADJ' # 形容詞
NA = 'NA' # 形容動詞 立派だ
janome = Tokenizer()
Mecab = {
'一段': V1,
'サ変・スル': VS,
'五段・カ行イ音便': VK5,
'五段・サ行': VS5,
'五段・タ行': VT5,
'五段・ナ行': VN5,
'五段・ワ行': VW5,
'五段・ワ行促音便': VW5,
'五段・マ行': VM5,
'五段・ラ行': VR5,
'五段・ガ行': VG5,
'五段・バ行': VB5,
'サ変・−ズル': VZ,
}
def detect_vpos(s):
toks = [print(tok) for tok in janome.tokenize(s)]
toks = [tok for tok in janome.tokenize(s)][::-1]
vpos = None
base = None
prefix=''
for t in toks:
pos = t.part_of_speech
if base is None and pos.startswith('動詞'):
vpos = Mecab.get(t.infl_type, t.infl_type)
base = t.base_form
continue
elif base is None and pos.startswith('形容詞'):
vpos = ADJ
base = t.base_form
continue
if base is not None:
prefix = t.surface + prefix
print(s, vpos, base, prefix)
return vpos, base, prefix
# mode
基本形 = 0
未然形 = 1 << 0
連用形 = 1 << 1
仮定形 = 1 << 2
命令形 = 1 << 3
接続形 = 1 << 4
過去形 = 1 << 5
否定形 = 1 << 6
丁寧形 = 1 << 7
できる = 1 << 8
させる = 1 << 9
せる = 1 << 10
れる = 1 << 11
される = 1 << 12
# 補助語
# 行って/くる ・話して/いる
# ・歌って/ほしい ・遊んで/もらう
# ・書いて/おく ・読んで/みる
# ・寝て/しまう ・置いて/ある
みる = 1 << 16
欲しい = 1 << 17
おく = 1 << 18
くる = 1 << 19
いく = 1 << 19
ある = 1 << 20
いる = 1 << 21
VSHIFT = 32
MODES = {
'丁寧形': 丁寧形,
'できる': できる,
'させる': させる,
'せる': せる,
'れる': れる,
'未然形': 未然形,
'過去形': 過去形,
'否定形': 否定形,
'接続形': 接続形,
}
def detect_mode(s):
toks = [str(tok) for tok in janome.tokenize(s)]
mode = 0
for t in toks:
if '未然形' in t:
mode |= 未然形
elif '連用形' in t:
mode |= 連用形
elif '仮定形' in t:
mode |= 仮定形
elif '特殊・タ' in t:
mode |= 過去形
elif '接続助詞' in t and 'て,テ,テ' in t:
mode |= 接続形
elif 'させる' in t: # 動詞,接尾,*,*,一段,基本形,させる,サセル,サセル
mode = させる
elif 'せる' in t: #動詞,接尾,*,*,一段,基本形,せる,セル,セル
mode = せる
elif 'れる' in t: #動詞,接尾,*,*,一段,基本形,れる,レル,レル
mode = れる
# elif 'できる' in t: # 動詞,自立,*,*,一段,連用形,できる,デキ,デキ
# mode = できる
elif '特殊・マス' in t:
mode = 丁寧形
elif '特殊・ナイ' in t:
mode = 否定形
elif '不変化型,基本形,ん,ン,ン' in t:
mode |= 否定形
elif mode & 接続形 == 接続形 and '一段' in t and ',みる,' in t:
# 動詞,非自立,*,*,一段,基本形,みる,ミル,ミル
# 動詞,非自立,*,*,一段,連用形,みる,ミ,ミ
mode = (mode << VSHIFT) | みる
detect_vpos(s)
print(mode)
return mode
VAR = {
VS: (2, 'し', 'し', 'して', 'した', 'する', 'すれ', 'しろ'),
#VZ = 'VZ' # サ変
#VK = 'VK' # カ変
V1: (1, '', '', 'て', 'た', 'る', 'れ', 'ろ'),
VK5: (1, 'か', 'き', 'いて', 'いた', 'く', 'け', 'こ'),
VS5: (1, 'さ', 'し', 'して', 'した', 'す', 'せ', 'そ'),
VT5: (1, 'た', 'ち', 'って', 'った', 'つ', 'て', 'と'),
VN5: (1, 'な', 'に', 'んで', 'んだ', 'ぬ', 'ね', 'の'),
VM5: (1, 'ま', 'み', 'んで', 'んだ', 'む', 'め', 'も'),
VR5: (1, 'ら', 'り', 'って', 'った', 'る', 'れ', 'ろ'),
VW5: (1, 'わ', 'い', 'って', 'った', 'う', 'え', 'お'),
VG5: (1, 'が', 'ぎ', 'いで', 'いだ', 'ぐ', 'げ', 'ご'),
VB5: (1, 'ば', 'び', 'んで', 'んだ', 'ぶ', 'べ', 'ぼ'),
ADJ: (1, 'く', 'く', 'くて', 'かった', 'い', 'けれ', ''),
'ます': (1, 'ません', '', 'まして', 'ました', 'ます', 'ませ', '')
# NA = 'NA' # 形容動詞 立派だ
}
def varindex(mode):
if mode & 接続形 == 接続形: return 3
if mode & 過去形 == 過去形: return 4
if mode & 未然形 == 未然形: return 1
if mode & 連用形 == 連用形: return 2
if mode & 仮定形 == 仮定形: return 6
if mode & 命令形 == 命令形: return 7
if mode == 基本形: return 5
print('dedug mode =', mode)
return 5
def emit_impl(base, vpos, mode):
if mode & みる == みる:
base = emit_impl(base, vpos, mode >> VSHIFT)
return emit_impl('みる', V1, mode & ~みる)
if mode & させる == させる:
if vpos == VS or vpos == VZ:
return emit_impl(base[:-2]+'させる', V1, mode & ~させる)
base = emit_impl(base, vpos, 未然形) + 'させる'
return emit_impl(base, V1, mode & ~させる)
if mode & せる == せる:
if vpos == VS or vpos == VZ:
return emit_impl(base[:-2]+'させる', V1, mode & ~させる)
base = emit_impl(base, vpos, 未然形) + 'せる'
return emit_impl(base, V1, mode & ~せる)
if mode & される == される:
if vpos == VS or vpos == VZ:
return emit_impl(base[:-2]+'される', V1, mode & ~される)
base = emit_impl(base, vpos, 未然形) + 'される'
return emit_impl(base, V1, mode & ~される)
if mode & れる == れる:
if vpos == VS or vpos == VZ:
return emit_impl(base[:-2]+'される', V1, mode & ~れる)
base = emit_impl(base, vpos, 未然形) + 'れる'
return emit_impl(base, V1, mode & ~れる)
if mode & できる == できる:
if vpos == VS or vpos == VZ:
return emit_impl(base[:-2]+'できる', V1, mode & ~できる)
if vpos == V1:
return emit_impl(base[:-1]+'られる', V1, mode & ~できる)
## 書く -> 書ける
base = emit_impl(base, vpos, 仮定形)+'る'
return emit_impl(base, V1, mode & ~できる)
if mode & 丁寧形 == 丁寧形:
base = emit_impl(base, vpos, 連用形) + 'ます'
return emit_impl(base, 'MASU', mode & ~丁寧形)
if mode & 否定形 == 否定形:
base = emit_impl(base, vpos, 未然形) + 'ない'
return emit_impl(base, ADJ, mode & ~否定形)
d = VAR[vpos]
base = base[:-d[0]]
return base + d[varindex(mode)]
def modes(mode):
ss = []
for key in MODES:
m = MODES[key]
if mode & m == m:
ss.append(f'#{key}')
return ' '.join(ss)
def test(s):
vpos, base, prefix = detect_vpos(s)
mode = detect_mode(s)
print(s, '=>', emit_impl(prefix+base, vpos, mode))
if __name__ == '__main__':
test('彼はごん攻めする')
test('入力された')
print('入力した => ', emit_impl('入力する', VS, れる|過去形|仮定形))
print('書かれた => ', emit_impl('書く', VK5, れる|過去形|否定形))
| 25.796748
| 62
| 0.466436
|
95d836ac3c3c58f2dd3467844a574bfa0e2463ed
| 2,275
|
py
|
Python
|
src/unit_1_3.py
|
tommylees112/scientific-computing
|
08a4173287699c7012fdd01de949d299e38aa30c
|
[
"MIT"
] | 5
|
2021-02-03T02:10:15.000Z
|
2022-01-12T13:21:47.000Z
|
src/unit_1_3.py
|
tommylees112/scientific-computing
|
08a4173287699c7012fdd01de949d299e38aa30c
|
[
"MIT"
] | 3
|
2021-02-01T16:00:30.000Z
|
2021-02-02T17:09:17.000Z
|
src/unit_1_3.py
|
tommylees112/scientific-computing
|
08a4173287699c7012fdd01de949d299e38aa30c
|
[
"MIT"
] | 5
|
2021-02-01T15:45:47.000Z
|
2022-02-04T12:20:25.000Z
|
import numpy as np
import matplotlib.pylab as plt
import scipy
import scipy.linalg
import sys
def lu_decomposition(A):
m, n = A.shape
LU = np.copy(A)
pivots = np.empty(n, dtype=int)
# initialise the pivot row and column
h = 0
k = 0
while h < m and k < n:
# Find the k-th pivot:
pivots[k] = np.argmax(LU[h:, k]) + h
if LU[pivots[k], k] == 0:
# No pivot in this column, pass to next column
k = k+1
else:
# swap rows
LU[[h, pivots[k]], :] = LU[[pivots[k], h], :]
# Do for all rows below pivot:
for i in range(h+1, m):
f = LU[i, k] / LU[h, k]
# Store f as the new L column values
LU[i, k] = f
# Do for all remaining elements in current row:
for j in range(k + 1, n):
LU[i, j] = LU[i, j] - LU[h, j] * f
# Increase pivot row and column
h = h + 1
k = k + 1
return LU, pivots
def random_matrix(n):
R = np.random.rand(n, n)
A = np.zeros((n, n))
triu = np.triu_indices(n)
A[triu] = R[triu]
return A
def random_non_singular_matrix(n):
A = np.random.rand(n, n)
while np.linalg.cond(A) > 1/sys.float_info.epsilon:
A = np.random.rand(n, n)
return A
As = [
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]),
random_non_singular_matrix(3),
random_non_singular_matrix(4),
random_non_singular_matrix(5),
random_non_singular_matrix(6),
]
def pivots_to_row_indices(pivots):
n = len(pivots)
indices = np.array(range(0, n))
for i, p in enumerate(pivots):
indices[i], indices[p] = indices[p], indices[i]
return indices
def calculate_L_mult_U(LU):
L = np.tril(LU)
np.fill_diagonal(L, 1)
U = np.triu(LU)
return L @ U
for A in As:
LU_scipy, pivots_scipy = scipy.linalg.lu_factor(A)
row_indices_scipy = pivots_to_row_indices(pivots_scipy)
LU_mine, pivots_mine = lu_decomposition(A)
row_indices_mine = pivots_to_row_indices(pivots_mine)
np.testing.assert_almost_equal(calculate_L_mult_U(LU_scipy), A[row_indices_scipy])
np.testing.assert_almost_equal(calculate_L_mult_U(LU_mine), A[row_indices_mine])
| 28.4375
| 87
| 0.578022
|
85fc7d1f6b9f49e847eb6faa80290a6ed316a7c4
| 108
|
py
|
Python
|
baiduindex/__init__.py
|
zhyzhyzhy123/baiduindex
|
04fd772c25ebce0c5c0a94a1caf046264c76592b
|
[
"MIT"
] | null | null | null |
baiduindex/__init__.py
|
zhyzhyzhy123/baiduindex
|
04fd772c25ebce0c5c0a94a1caf046264c76592b
|
[
"MIT"
] | null | null | null |
baiduindex/__init__.py
|
zhyzhyzhy123/baiduindex
|
04fd772c25ebce0c5c0a94a1caf046264c76592b
|
[
"MIT"
] | null | null | null |
__version__ = '0.0.0'
__author__ = 'zhy'
__describtion__ = 'Spider of Baidu Index'
from .index import index
| 21.6
| 41
| 0.740741
|
1f64a5a71ba8caf6c6e24069a04fdaaa925a3cc3
| 39,218
|
py
|
Python
|
yolact_edge/data/config.py
|
michaelcukier/yolact_edge
|
0453ad74b1e1ec7c197562025a730cc03c49c2c4
|
[
"MIT"
] | null | null | null |
yolact_edge/data/config.py
|
michaelcukier/yolact_edge
|
0453ad74b1e1ec7c197562025a730cc03c49c2c4
|
[
"MIT"
] | 1
|
2021-10-06T09:52:03.000Z
|
2021-10-06T09:52:03.000Z
|
yolact_edge/data/config.py
|
michaelcukier/yolact_edge
|
0453ad74b1e1ec7c197562025a730cc03c49c2c4
|
[
"MIT"
] | 2
|
2021-10-06T09:50:17.000Z
|
2021-11-05T10:57:09.000Z
|
from yolact_edge.backbone import ResNetBackbone, VGGBackbone, ResNetBackboneGN, DarkNetBackbone, MobileNetV2Backbone
from math import sqrt
import torch
# for making bounding boxes pretty
COLORS = ((244, 67, 54),
(233, 30, 99),
(156, 39, 176),
(103, 58, 183),
( 63, 81, 181),
( 33, 150, 243),
( 3, 169, 244),
( 0, 188, 212),
( 0, 150, 136),
( 76, 175, 80),
(139, 195, 74),
(205, 220, 57),
(255, 235, 59),
(255, 193, 7),
(255, 152, 0),
(255, 87, 34),
(121, 85, 72),
(158, 158, 158),
( 96, 125, 139))
# These are in BGR and are for ImageNet
MEANS = (103.94, 116.78, 123.68)
STD = (57.38, 57.12, 58.40)
OVERALL_ANNOTATION_CLASSES=('ripe','unripe','pink')
OVERALL_ANNOTATION_LABEL_MAP={0: 1, 1: 2, 2: 3}
COCO_CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',
'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',
'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop',
'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven',
'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush')
COCO_LABEL_MAP = { 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8,
9: 9, 10: 10, 11: 11, 13: 12, 14: 13, 15: 14, 16: 15, 17: 16,
18: 17, 19: 18, 20: 19, 21: 20, 22: 21, 23: 22, 24: 23, 25: 24,
27: 25, 28: 26, 31: 27, 32: 28, 33: 29, 34: 30, 35: 31, 36: 32,
37: 33, 38: 34, 39: 35, 40: 36, 41: 37, 42: 38, 43: 39, 44: 40,
46: 41, 47: 42, 48: 43, 49: 44, 50: 45, 51: 46, 52: 47, 53: 48,
54: 49, 55: 50, 56: 51, 57: 52, 58: 53, 59: 54, 60: 55, 61: 56,
62: 57, 63: 58, 64: 59, 65: 60, 67: 61, 70: 62, 72: 63, 73: 64,
74: 65, 75: 66, 76: 67, 77: 68, 78: 69, 79: 70, 80: 71, 81: 72,
82: 73, 84: 74, 85: 75, 86: 76, 87: 77, 88: 78, 89: 79, 90: 80}
YOUTUBE_VIS_CLASSES = ('person', 'giant_panda', 'lizard', 'parrot', 'skateboard',
'sedan', 'ape', 'dog', 'snake', 'monkey', 'hand', 'rabbit',
'duck', 'cat', 'cow', 'fish', 'train', 'horse', 'turtle',
'bear', 'motorbike', 'giraffe', 'leopard', 'fox', 'deer',
'owl', 'surfboard', 'airplane', 'truck', 'zebra', 'tiger',
'elephant', 'snowboard', 'boat', 'shark', 'mouse', 'frog',
'eagle', 'earless_seal', 'tennis_racket')
YOUTUBE_VIS_LABEL_MAP = { 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7,
8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14,
15: 15, 16: 16, 17: 17, 18: 18, 19: 19, 20: 20, 21: 21,
22: 22, 23: 23, 24: 24, 25: 25, 26: 26, 27: 27, 28: 28,
29: 29, 30: 30, 31: 31, 32: 32, 33: 33, 34: 34, 35: 35,
36: 36, 37: 37, 38: 38, 39: 39, 40: 40}
COCO_INV_LABEL_MAP = {t: s for s, t in COCO_LABEL_MAP.items()}
YTVIS_COCO_CLASS_MAP = {'person': 'person', 'skateboard': 'skateboard', 'sedan': 'car',
'dog': 'dog', 'cat': 'cat', 'cow': 'cow', 'train': 'train',
'horse': 'horse', 'bear': 'bear', 'motorbike': 'motorcycle',
'giraffe': 'giraffe', 'surfboard': 'surfboard', 'airplane': 'airplane',
'truck': 'truck', 'zebra': 'zebra', 'elephant': 'elephant',
'snowboard': 'snowboard', 'boat': 'boat', 'tennis_racket': 'tennis racket'}
COCO_YTVIS_CLASS_MAP = {coco: ytvis for ytvis, coco in YTVIS_COCO_CLASS_MAP.items()}
COCO_YTVIS_LABEL_MAP = {COCO_INV_LABEL_MAP[COCO_CLASSES.index(coco) + 1]: YOUTUBE_VIS_CLASSES.index(ytvis) + 1 for coco, ytvis in COCO_YTVIS_CLASS_MAP.items()}
COCO_INTER_LABEL_MAP = {COCO_INV_LABEL_MAP[COCO_CLASSES.index(coco) + 1]: COCO_CLASSES.index(coco) + 1 for coco in COCO_YTVIS_CLASS_MAP}
MOTS_CLASSES = ('car', 'pedestrian')
MOTS_LABEL_MAP = {1: 1, 2: 2}
# ----------------------- CONFIG CLASS ----------------------- #
class Config(object):
"""
Holds the configuration for anything you want it to.
To get the currently active config, call get_cfg().
To use, just do cfg.x instead of cfg['x'].
I made this because doing cfg['x'] all the time is dumb.
"""
def __init__(self, config_dict):
for key, val in config_dict.items():
self.__setattr__(key, val)
def copy(self, new_config_dict={}):
"""
Copies this config into a new config object, making
the changes given by new_config_dict.
"""
ret = Config(vars(self))
for key, val in new_config_dict.items():
ret.__setattr__(key, val)
return ret
def replace(self, new_config_dict):
"""
Copies new_config_dict into this config object.
Note: new_config_dict can also be a config object.
"""
if isinstance(new_config_dict, Config):
new_config_dict = vars(new_config_dict)
for key, val in new_config_dict.items():
self.__setattr__(key, val)
def print(self):
for k, v in vars(self).items():
print(k, ' = ', v)
# ----------------------- DATASETS ----------------------- #
dataset_base = Config({
'name': 'Base Dataset',
# Training images and annotations
'train_images': './data/coco/images/',
'train_info': 'path_to_annotation_file',
# Calibration image folder for TensorRT INT8 conversion.
'calib_images': './data/coco/calib_images/',
# Validation images and annotations.
'valid_images': './data/coco/images/',
'valid_info': 'path_to_annotation_file',
# Whether or not to load GT. If this is False, eval.py quantitative evaluation won't work.
'has_gt': True,
# Whether the dataset is a video dataset
'is_video': False,
# A list of names for each of you classes.
'class_names': COCO_CLASSES,
# COCO class ids aren't sequential, so this is a bandage fix. If your ids aren't sequential,
# provide a map from category_id -> index in class_names + 1 (the +1 is there because it's 1-indexed).
# If not specified, this just assumes category ids start at 1 and increase sequentially.
'label_map': None,
# Dataset Map
'dataset_map': None,
# Joint training
'joint': None
})
debug_dataset = dataset_base.copy({
'name':'debug_dataset',
'train_images': '/home/appuser/datasets/OVERALL_ANNOTATION_SMALL',
'train_info': '/home/appuser/datasets/OVERALL_ANNOTATION_SMALL/coco.json',
'valid_images': '/home/appuser/datasets/OVERALL_ANNOTATION_SMALL',
'valid_info': '/home/appuser/datasets/OVERALL_ANNOTATION_SMALL/coco.json',
'class_names':('flesh_ripe','flesh_unripe'),
'label_map': {0:1,1:2}
})
strawberry_dataset=dataset_base.copy({
'name': 'strawberry_base',
'label_map': {0: 1, 1: 2}
})
bag_610_dataset= strawberry_dataset.copy({
'name':'bag_610',
'train_images':'/datasets/610babd54e5e825f560b66b2',
'train_info':'/datasets/610babd54e5e825f560b66b2/coco.json',
'valid_images':'/datasets/OVERALL_ANNOTATION/',
'valid_info':'/datasets/OVERALL_ANNOTATION/coco.json',
'class_names': ('ripe','unripe'),
# If using 3 class data then need to map pink to unripe
'label_map': {0: 1, 1: 2,2:2}
})
overall_annotations_dataset = dataset_base.copy({
'name': 'overall_annotations_norway',
'train_images': '/datasets/OVERALL_ANNOTATION/',
'train_info': '/datasets/OVERALL_ANNOTATION/train_3cat.json',
'valid_images': '/datasets/OVERALL_ANNOTATION/',
'valid_info': '/datasets/OVERALL_ANNOTATION/test_3cat.json',
'has_gt': True,
'label_map': OVERALL_ANNOTATION_LABEL_MAP,
'class_names': OVERALL_ANNOTATION_CLASSES
})
overall_annotations_dataset_server = dataset_base.copy({
'name': 'overall_annotations_norway',
'train_images': '/datasets/OVERALL_ANNOTATION/',
'train_info': '/datasets/OVERALL_ANNOTATION/train_3cat.json',
'valid_images': '/datasets/OVERALL_ANNOTATION/',
'valid_info': '/datasets/OVERALL_ANNOTATION/test_3cat.json',
'class_names':('ripe','unripe','pink'),
'label_map': {0:1,1:2,2:3}
})
coco2014_dataset = dataset_base.copy({
'name': 'COCO 2014',
'train_info': './data/coco/annotations/instances_train2014.json',
'valid_info': './data/coco/annotations/instances_val2014.json',
'label_map': COCO_LABEL_MAP
})
coco2017_dataset = dataset_base.copy({
'name': 'COCO 2017',
'train_info': './data/coco/annotations/instances_train2017.json',
'valid_info': './data/coco/annotations/instances_val2017.json',
'label_map': COCO_LABEL_MAP
})
coco2017_testdev_dataset = dataset_base.copy({
'name': 'COCO 2017 Test-Dev',
'valid_info': './data/coco/annotations/image_info_test-dev2017.json',
'has_gt': False,
'label_map': COCO_LABEL_MAP
})
coco2017_testdev_dataset = dataset_base.copy({
'name': 'COCO 2017 Test-Dev',
'valid_info': './data/coco/annotations/image_info_test-dev2017.json',
'has_gt': False,
'label_map': COCO_LABEL_MAP
})
flying_chairs_dataset = dataset_base.copy({
'name': 'FlyingChairs',
'trainval_info': './data/FlyingChairs/train_val.txt',
'trainval_images': './data/FlyingChairs/data/',
})
youtube_vis_dataset = dataset_base.copy({
'name': 'YouTube VIS',
'class_names': YOUTUBE_VIS_CLASSES,
'label_map': YOUTUBE_VIS_LABEL_MAP,
'train_info': './data/YoutubeVIS/annotations/train.v4.json',
'train_images': './data/YoutubeVIS/train_all_frames/JPEGImages/',
'use_all_frames': False,
# Calibration image folder for TensorRT INT8 conversion.
# Because we need two frames (prev, next) to estimate flows and calibrate the warping module, we need to specify a parent folder for calibration images, and two sub-folders for previous and next frames correspondingly.
# Use colon(:) to split folder (sub-folders).
'calib_images': './data/YoutubeVIS/calib_images/:prev:next',
'frame_offset_lb': 1,
'frame_offset_ub': 4,
'frame_offset_multiplier': 1,
'all_frame_direction': 'allway',
'valid_info': './data/YoutubeVIS/annotations/valid.v4.json',
'valid_images': './data/YoutubeVIS/valid_all_frames/v4/',
'images_per_video': 5,
'is_video': True
})
# ----------------------- TRANSFORMS ----------------------- #
resnet_transform = Config({
'channel_order': 'RGB',
'normalize': True,
'subtract_means': False,
'to_float': False,
})
vgg_transform = Config({
# Note that though vgg is traditionally BGR,
# the channel order of vgg_reducedfc.pth is RGB.
'channel_order': 'RGB',
'normalize': False,
'subtract_means': True,
'to_float': False,
})
darknet_transform = Config({
'channel_order': 'RGB',
'normalize': False,
'subtract_means': False,
'to_float': True,
})
mobilenetv2_transform = Config({
'channel_order': 'RGB',
'normalize': True,
'subtract_means': False,
'to_float': False,
})
# ----------------------- BACKBONES ----------------------- #
backbone_base = Config({
'name': 'Base Backbone',
'path': 'path/to/pretrained/weights',
'type': object,
'args': tuple(),
'transform': resnet_transform,
'selected_layers': list(),
'pred_scales': list(),
'pred_aspect_ratios': list(),
'use_pixel_scales': False,
'preapply_sqrt': True,
'use_square_anchors': False,
})
resnet101_backbone = backbone_base.copy({
'name': 'ResNet101',
'path': 'resnet101_reducedfc.pth',
'type': ResNetBackbone,
'args': ([3, 4, 23, 3],),
'transform': resnet_transform,
'selected_layers': list(range(2, 8)),
'pred_scales': [[1]]*6,
'pred_aspect_ratios': [ [[0.66685089, 1.7073535, 0.87508774, 1.16524493, 0.49059086]] ] * 6,
})
resnet101_gn_backbone = backbone_base.copy({
'name': 'ResNet101_GN',
'path': 'R-101-GN.pkl',
'type': ResNetBackboneGN,
'args': ([3, 4, 23, 3],),
'transform': resnet_transform,
'selected_layers': list(range(2, 8)),
'pred_scales': [[1]]*6,
'pred_aspect_ratios': [ [[0.66685089, 1.7073535, 0.87508774, 1.16524493, 0.49059086]] ] * 6,
})
resnet152_backbone = resnet101_backbone.copy({
'name': 'ResNet152',
'path': 'resnet152-b121ed2d.pth',
'type': ResNetBackbone,
'args': ([3, 8, 36, 3],),
'transform': resnet_transform,
})
resnet50_backbone = resnet101_backbone.copy({
'name': 'ResNet50',
'path': 'resnet50-19c8e357.pth',
'type': ResNetBackbone,
'args': ([3, 4, 6, 3],),
'transform': resnet_transform,
})
darknet53_backbone = backbone_base.copy({
'name': 'DarkNet53',
'path': 'darknet53.pth',
'type': DarkNetBackbone,
'args': ([1, 2, 8, 8, 4],),
'transform': darknet_transform,
'selected_layers': list(range(3, 9)),
'pred_scales': [[3.5, 4.95], [3.6, 4.90], [3.3, 4.02], [2.7, 3.10], [2.1, 2.37], [1.8, 1.92]],
'pred_aspect_ratios': [ [[1, sqrt(2), 1/sqrt(2), sqrt(3), 1/sqrt(3)][:n], [1]] for n in [3, 5, 5, 5, 3, 3] ],
})
vgg16_arch = [[64, 64],
[ 'M', 128, 128],
[ 'M', 256, 256, 256],
[('M', {'kernel_size': 2, 'stride': 2, 'ceil_mode': True}), 512, 512, 512],
[ 'M', 512, 512, 512],
[('M', {'kernel_size': 3, 'stride': 1, 'padding': 1}),
(1024, {'kernel_size': 3, 'padding': 6, 'dilation': 6}),
(1024, {'kernel_size': 1})]]
vgg16_backbone = backbone_base.copy({
'name': 'VGG16',
'path': 'vgg16_reducedfc.pth',
'type': VGGBackbone,
'args': (vgg16_arch, [(256, 2), (128, 2), (128, 1), (128, 1)], [3]),
'transform': vgg_transform,
'selected_layers': [3] + list(range(5, 10)),
'pred_scales': [[5, 4]]*6,
'pred_aspect_ratios': [ [[1], [1, sqrt(2), 1/sqrt(2), sqrt(3), 1/sqrt(3)][:n]] for n in [3, 5, 5, 5, 3, 3] ],
})
mobilenetv2_arch = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
mobilenetv2_backbone = backbone_base.copy({
'name': 'MobileNetV2',
'path': 'mobilenet_v2-b0353104.pth',
'type': MobileNetV2Backbone,
'args': (1.0, mobilenetv2_arch, 8),
'transform': mobilenetv2_transform,
'selected_layers': [3, 4, 6],
'pred_aspect_ratios': [ [[1, 1/2, 2]] ]*5,
'pred_scales': [[24], [48], [96], [192], [384]],
'use_pixel_scales': True,
'preapply_sqrt': False,
'use_square_anchors': True,
})
# ----------------------- MASK BRANCH TYPES ----------------------- #
mask_type = Config({
# Direct produces masks directly as the output of each pred module.
# This is denoted as fc-mask in the paper.
# Parameters: mask_size, use_gt_bboxes
'direct': 0,
# Lincomb produces coefficients as the output of each pred module then uses those coefficients
# to linearly combine features from a prototype network to create image-sized masks.
# Parameters:
# - masks_to_train (int): Since we're producing (near) full image masks, it'd take too much
# vram to backprop on every single mask. Thus we select only a subset.
# - mask_proto_src (int): The input layer to the mask prototype generation network. This is an
# index in backbone.layers. Use to use the image itself instead.
# - mask_proto_net (list<tuple>): A list of layers in the mask proto network with the last one
# being where the masks are taken from. Each conv layer is in
# the form (num_features, kernel_size, **kwdargs). An empty
# list means to use the source for prototype masks. If the
# kernel_size is negative, this creates a deconv layer instead.
# If the kernel_size is negative and the num_features is None,
# this creates a simple bilinear interpolation layer instead.
# - mask_proto_bias (bool): Whether to include an extra coefficient that corresponds to a proto
# mask of all ones.
# - mask_proto_prototype_activation (func): The activation to apply to each prototype mask.
# - mask_proto_mask_activation (func): After summing the prototype masks with the predicted
# coeffs, what activation to apply to the final mask.
# - mask_proto_coeff_activation (func): The activation to apply to the mask coefficients.
# - mask_proto_crop (bool): If True, crop the mask with the predicted bbox during training.
# - mask_proto_crop_expand (float): If cropping, the percent to expand the cropping bbox by
# in each direction. This is to make the model less reliant
# on perfect bbox predictions.
# - mask_proto_loss (str [l1|disj]): If not None, apply an l1 or disjunctive regularization
# loss directly to the prototype masks.
# - mask_proto_binarize_downsampled_gt (bool): Binarize GT after dowsnampling during training?
# - mask_proto_normalize_mask_loss_by_sqrt_area (bool): Whether to normalize mask loss by sqrt(sum(gt))
# - mask_proto_reweight_mask_loss (bool): Reweight mask loss such that background is divided by
# #background and foreground is divided by #foreground.
# - mask_proto_grid_file (str): The path to the grid file to use with the next option.
# This should be a numpy.dump file with shape [numgrids, h, w]
# where h and w are w.r.t. the mask_proto_src convout.
# - mask_proto_use_grid (bool): Whether to add extra grid features to the proto_net input.
# - mask_proto_coeff_gate (bool): Add an extra set of sigmoided coefficients that is multiplied
# into the predicted coefficients in order to "gate" them.
# - mask_proto_prototypes_as_features (bool): For each prediction module, downsample the prototypes
# to the convout size of that module and supply the prototypes as input
# in addition to the already supplied backbone features.
# - mask_proto_prototypes_as_features_no_grad (bool): If the above is set, don't backprop gradients to
# to the prototypes from the network head.
# - mask_proto_remove_empty_masks (bool): Remove masks that are downsampled to 0 during loss calculations.
# - mask_proto_reweight_coeff (float): The coefficient to multiple the forground pixels with if reweighting.
# - mask_proto_coeff_diversity_loss (bool): Apply coefficient diversity loss on the coefficients so that the same
# instance has similar coefficients.
# - mask_proto_coeff_diversity_alpha (float): The weight to use for the coefficient diversity loss.
# - mask_proto_normalize_emulate_roi_pooling (bool): Normalize the mask loss to emulate roi pooling's affect on loss.
# - mask_proto_double_loss (bool): Whether to use the old loss in addition to any special new losses.
# - mask_proto_double_loss_alpha (float): The alpha to weight the above loss.
'lincomb': 1,
})
# ----------------------- ACTIVATION FUNCTIONS ----------------------- #
activation_func = Config({
'tanh': torch.tanh,
'sigmoid': torch.sigmoid,
'softmax': lambda x: torch.nn.functional.softmax(x, dim=-1),
'relu': lambda x: torch.nn.functional.relu(x, inplace=True),
'none': lambda x: x,
})
# ----------------------- FPN DEFAULTS ----------------------- #
fpn_base = Config({
# The number of features to have in each FPN layer
'num_features': 256,
# The upsampling mode used
'interpolation_mode': 'bilinear',
# The number of extra layers to be produced by downsampling starting at P5
'num_downsample': 1,
# Whether to down sample with a 3x3 stride 2 conv layer instead of just a stride 2 selection
'use_conv_downsample': False,
# Whether to pad the pred layers with 1 on each side (I forgot to add this at the start)
# This is just here for backwards compatibility
'pad': True,
})
# ------------------------ FLOW DEFAULTS ------------------------ #
flow_base = Config({
'encode_layers': [[4, 1], [2], [4]],
'encode_channels': 256,
'fine_tune_layers': None,
'warp_layers': "P4P5",
'use_spa': False,
'use_normalized_spa': False,
'use_shuffle_cat': False,
'num_groups': 1,
'use_scale_factor': True,
'use_scale_bias': True,
'reduce_channels': [],
'warp_mode': 'none',
'flow_layer': 'each',
'base_backward': True,
'feature_matching_loss': None,
'fm_loss_loc': 'L',
'fm_loss_alpha': 1.0,
'train_flow': False,
'model': 'none',
})
# ----------------------- CONFIG DEFAULTS ----------------------- #
coco_base_config = Config({
'dataset': coco2014_dataset,
'joint_dataset': None,
'num_classes': 81, # This should include the background class
'max_iter': 400000,
# The maximum number of detections for evaluation
'max_num_detections': 100,
# dw' = momentum * dw - lr * (grad + decay * w)
'lr': 1e-3,
'momentum': 0.9,
'decay': 5e-4,
# For each lr step, what to multiply the lr with
'gamma': 0.1,
'lr_steps': (280000, 360000, 400000),
# Initial learning rate to linearly warmup from (if until > 0)
'lr_warmup_init': 1e-4,
# If > 0 then increase the lr linearly from warmup_init to lr each iter for until iters
'lr_warmup_until': 500,
# The terms to scale the respective loss by
'conf_alpha': 1,
'bbox_alpha': 1.5,
'mask_alpha': 0.4 / 256 * 140 * 140, # Some funky equation. Don't worry about it.
# Eval.py sets this if you just want to run YOLACT as a detector
'eval_mask_branch': True,
# See mask_type for details.
'mask_type': mask_type.direct,
'mask_size': 16,
'masks_to_train': 100,
'mask_proto_src': None,
'mask_proto_net': [(256, 3, {}), (256, 3, {})],
'mask_proto_bias': False,
'mask_proto_prototype_activation': activation_func.relu,
'mask_proto_mask_activation': activation_func.sigmoid,
'mask_proto_coeff_activation': activation_func.tanh,
'mask_proto_crop': True,
'mask_proto_crop_expand': 0,
'mask_proto_loss': None,
'mask_proto_binarize_downsampled_gt': True,
'mask_proto_normalize_mask_loss_by_sqrt_area': False,
'mask_proto_reweight_mask_loss': False,
'mask_proto_grid_file': 'data/grid.npy',
'mask_proto_use_grid': False,
'mask_proto_coeff_gate': False,
'mask_proto_prototypes_as_features': False,
'mask_proto_prototypes_as_features_no_grad': False,
'mask_proto_remove_empty_masks': False,
'mask_proto_reweight_coeff': 1,
'mask_proto_coeff_diversity_loss': False,
'mask_proto_coeff_diversity_alpha': 1,
'mask_proto_normalize_emulate_roi_pooling': False,
'mask_proto_double_loss': False,
'mask_proto_double_loss_alpha': 1,
# SSD data augmentation parameters
# Randomize hue, vibrance, etc.
'augment_photometric_distort': True,
# Have a chance to scale down the image and pad (to emulate smaller detections)
'augment_expand': True,
# Potentialy sample a random crop from the image and put it in a random place
'augment_random_sample_crop': True,
# Mirror the image with a probability of 1/2
'augment_random_mirror': True,
# Flip the image vertically with a probability of 1/2
'augment_random_flip': False,
# With uniform probability, rotate the image [0,90,180,270] degrees
'augment_random_rot90': False,
# Discard detections with width and height smaller than this (in absolute width and height)
'discard_box_width': 4 / 550,
'discard_box_height': 4 / 550,
# If using batchnorm anywhere in the backbone, freeze the batchnorm layer during training.
# Note: any additional batch norm layers after the backbone will not be frozen.
'freeze_bn': False,
# Set this to a config object if you want an FPN (inherit from fpn_base). See fpn_base for details.
'fpn': None,
# Use the same weights for each network head
'share_prediction_module': False,
# For hard negative mining, instead of using the negatives that are leastl confidently background,
# use negatives that are most confidently not background.
'ohem_use_most_confident': False,
# Use focal loss as described in https://arxiv.org/pdf/1708.02002.pdf instead of OHEM
'use_focal_loss': False,
'focal_loss_alpha': 0.25,
'focal_loss_gamma': 2,
# The initial bias toward forground objects, as specified in the focal loss paper
'focal_loss_init_pi': 0.01,
# Whether to use sigmoid focal loss instead of softmax, all else being the same.
'use_sigmoid_focal_loss': False,
# Use class[0] to be the objectness score and class[1:] to be the softmax predicted class.
# Note: at the moment this is only implemented if use_focal_loss is on.
'use_objectness_score': False,
# Adds a global pool + fc layer to the smallest selected layer that predicts the existence of each of the 80 classes.
# This branch is only evaluated during training time and is just there for multitask learning.
'use_class_existence_loss': False,
'class_existence_alpha': 1,
# Adds a 1x1 convolution directly to the biggest selected layer that predicts a semantic segmentations for each of the 80 classes.
# This branch is only evaluated during training time and is just there for multitask learning.
'use_semantic_segmentation_loss': False,
'semantic_segmentation_alpha': 1,
# Match gt boxes using the Box2Pix change metric instead of the standard IoU metric.
# Note that the threshold you set for iou_threshold should be negative with this setting on.
'use_change_matching': False,
# Uses the same network format as mask_proto_net, except this time it's for adding extra head layers before the final
# prediction in prediction modules. If this is none, no extra layers will be added.
'extra_head_net': None,
# What params should the final head layers have (the ones that predict box, confidence, and mask coeffs)
'head_layer_params': {'kernel_size': 3, 'padding': 1},
# Add extra layers between the backbone and the network heads
# The order is (bbox, conf, mask)
'extra_layers': (0, 0, 0),
# During training, to match detections with gt, first compute the maximum gt IoU for each prior.
# Then, any of those priors whose maximum overlap is over the positive threshold, mark as positive.
# For any priors whose maximum is less than the negative iou threshold, mark them as negative.
# The rest are neutral and not used in calculating the loss.
'positive_iou_threshold': 0.5,
'negative_iou_threshold': 0.5,
# If less than 1, anchors treated as a negative that have a crowd iou over this threshold with
# the crowd boxes will be treated as a neutral.
'crowd_iou_threshold': 1,
# This is filled in at runtime by Yolact's __init__, so don't touch it
'mask_dim': None,
# Input image size. If preserve_aspect_ratio is False, min_size is ignored.
'min_size': 200,
'max_size': 300,
# Whether or not to do post processing on the cpu at test time
'force_cpu_nms': True,
# Whether to use mask coefficient cosine similarity nms instead of bbox iou nms
'use_coeff_nms': False,
# Whether or not to have a separate branch whose sole purpose is to act as the coefficients for coeff_diversity_loss
# Remember to turn on coeff_diversity_loss, or these extra coefficients won't do anything!
# To see their effect, also remember to turn on use_coeff_nms.
'use_instance_coeff': False,
'num_instance_coeffs': 64,
# Whether or not to tie the mask loss / box loss to 0
'train_masks': True,
'train_boxes': True,
# If enabled, the gt masks will be cropped using the gt bboxes instead of the predicted ones.
# This speeds up training time considerably but results in much worse mAP at test time.
'use_gt_bboxes': False,
# Whether or not to preserve aspect ratio when resizing the image.
# If True, uses the faster r-cnn resizing scheme.
# If False, all images are resized to max_size x max_size
'preserve_aspect_ratio': False,
# Whether or not to use the prediction module (c) from DSSD
'use_prediction_module': False,
# Whether or not to use the predicted coordinate scheme from Yolo v2
'use_yolo_regressors': False,
# For training, bboxes are considered "positive" if their anchors have a 0.5 IoU overlap
# or greater with a ground truth box. If this is true, instead of using the anchor boxes
# for this IoU computation, the matching function will use the predicted bbox coordinates.
# Don't turn this on if you're not using yolo regressors!
'use_prediction_matching': False,
# A list of settings to apply after the specified iteration. Each element of the list should look like
# (iteration, config_dict) where config_dict is a dictionary you'd pass into a config object's init.
'delayed_settings': [],
# Use command-line arguments to set this.
'no_jit': False,
'backbone': None,
'name': 'base_config',
})
# ----------------------- YOLACT v1.0 CONFIGS ----------------------- #
yolact_base_config = coco_base_config.copy({
'name': 'yolact_base',
# Dataset stuff
'dataset': coco2017_dataset,
'num_classes': len(coco2017_dataset.class_names) + 1,
# Image Size
'max_size': 550,
# Training params
'lr_schedule': 'step',
'lr_steps': (280000, 600000, 700000, 750000),
'max_iter': 800000,
'flow': flow_base,
# Backbone Settings
'backbone': resnet101_backbone.copy({
'selected_layers': list(range(1, 4)),
'use_pixel_scales': True,
'preapply_sqrt': False,
'use_square_anchors': True, # This is for backward compatability with a bug
'pred_aspect_ratios': [ [[1, 1/2, 2]] ]*5,
'pred_scales': [[24], [48], [96], [192], [384]],
}),
# FPN Settings
'fpn': fpn_base.copy({
'use_conv_downsample': True,
'num_downsample': 2,
}),
# Mask Settings
'mask_type': mask_type.lincomb,
'mask_alpha': 6.125,
'mask_proto_src': 0,
'mask_proto_net': [(256, 3, {'padding': 1})] * 3 + [(None, -2, {}), (256, 3, {'padding': 1})] + [(32, 1, {})],
'mask_proto_normalize_emulate_roi_pooling': True,
# Other stuff
'share_prediction_module': True,
'extra_head_net': [(256, 3, {'padding': 1})],
'positive_iou_threshold': 0.5,
'negative_iou_threshold': 0.4,
'crowd_iou_threshold': 0.7,
'use_semantic_segmentation_loss': True,
'torch2trt_backbone': False,
'torch2trt_backbone_int8': False,
'torch2trt_protonet': False,
'torch2trt_protonet_int8': False,
'torch2trt_fpn': False,
'torch2trt_fpn_int8': False,
'torch2trt_prediction_module': False,
'torch2trt_prediction_module_int8': False,
'torch2trt_spa': False,
'torch2trt_spa_int8': False,
'torch2trt_flow_net': False,
'torch2trt_flow_net_int8': False,
'use_tensorrt_safe_mode': False,
})
yolact_edge_config = yolact_base_config.copy({
'name': 'yolact_edge',
'torch2trt_max_calibration_images': 100,
'torch2trt_backbone_int8': True,
'torch2trt_protonet_int8': True,
'torch2trt_fpn': True,
'torch2trt_prediction_module': True,
'use_fast_nms': False
})
bag_610_config= yolact_edge_config.copy({
'name': 'bag_610',
'dataset': bag_610_dataset,
'num_classes': len(bag_610_dataset.class_names) + 1
})
overall_annotation_config_server = yolact_edge_config.copy({
'name': 'overall_annotation_server',
# Dataset stuff
'dataset': overall_annotations_dataset_server,
'num_classes': len(overall_annotations_dataset_server.class_names) + 1,
'class_names':('flesh_ripe','flesh_unripe'),
'label_map': {0:1,1:2}
})
overall_annotation_config = yolact_edge_config.copy({
'name': 'overall_annotation',
'max_size':200,
# Dataset stuff
'dataset': overall_annotations_dataset,
'num_classes': len(overall_annotations_dataset.class_names) + 1,
'class_names':('flesh_ripe','flesh_unripe'),
'label_map': {0:1,1:2}
})
debug_config=yolact_edge_config.copy({
'max_size': 32,
'freeze_bn': True,
'lr': 25e-5,
'dataset': debug_dataset,
'num_classes': len(debug_dataset.class_names) + 1
})
yolact_edge_config_test = yolact_base_config.copy({
'name': 'yolact_edge_test',
'torch2trt_max_calibration_images': 100,
'torch2trt_backbone_int8': True,
'torch2trt_protonet_int8': True,
'torch2trt_fpn': True,
'torch2trt_prediction_module': True,
'use_fast_nms': False
})
yolact_edge_mobilenetv2_config = yolact_edge_config.copy({
'name': 'yolact_edge_mobilenetv2',
'backbone': mobilenetv2_backbone
})
yolact_edge_vid_config = yolact_edge_config.copy({
'name': 'yolact_edge_vid',
'dataset': youtube_vis_dataset.copy({
'joint': 'coco',
'use_all_frames': True,
'images_per_video': 1,
'frame_offset_lb': 2,
'frame_offset_ub': 5,
'frame_offset_multiplier': 1,
'all_frame_direction': 'forward',
}),
'torch2trt_spa': True,
'torch2trt_spa_int8': False,
'torch2trt_flow_net': False,
'torch2trt_flow_net_int8': True,
'joint_dataset': yolact_edge_config.dataset.copy({
'dataset_map': 'ytvis'
}),
'lr': 2e-4,
'lr_warmup_init': 0,
'lr_schedule': 'cosine',
'max_iter': 200000,
'num_classes': len(youtube_vis_dataset.class_names) + 1,
'augment_expand': False,
'flow': flow_base.copy({
'encode_layers': [[1], [2], [4]],
'reduce_channels': [64],
'encode_channels': 64,
'num_groups': 1,
'use_shuffle_cat': False,
'base_backward': True,
'fine_tune_layers': 'flow_net,flow_net_pre_convs,spa,fpn_phase_2,proto_net,prediction_layers,semantic_seg_conv',
'selected_layers': [1, 2],
'warp_mode': 'flow',
'model': 'mini',
'use_pseudo_gt_flow_loss': False,
'feature_matching_loss': 'cosine',
'use_spa': True,
'fm_loss_loc': 'L+P',
})
})
yolact_edge_vid_minimal_config = yolact_edge_vid_config.copy({
'name': 'yolact_edge_vid_minimal',
'torch2trt_spa': False,
'flow': yolact_edge_vid_config.flow.copy({
'fine_tune_layers': 'flow_net,flow_net_pre_convs,fpn_phase_2,proto_net,prediction_layers,semantic_seg_conv',
'use_spa': False,
'feature_matching_loss': None,
})
})
yolact_edge_vid_trainflow_config = yolact_edge_vid_config.copy({
'name': 'yolact_edge_vid_trainflow',
'dataset': flying_chairs_dataset,
'lr': 2e-4,
'max_iter': 400000,
'flow': yolact_edge_vid_config.flow.copy({
'train_flow': True,
'base_backward': False,
'fine_tune_layers': 'flow_net,flow_net_pre_convs'
})
})
yolact_edge_youtubevis_config = yolact_edge_vid_config.copy({
'name': 'yolact_edge_youtubevis',
'dataset': yolact_edge_vid_config.dataset.copy({
'use_all_frames': False,
'images_per_video': 1,
}),
'torch2trt_spa': False,
'torch2trt_flow_net_int8': False,
'lr': 5e-4,
'lr_schedule': 'cosine',
'max_iter': 500000,
'augment_expand': True,
'flow': yolact_edge_vid_config.flow.copy({
'warp_mode': 'none',
'fine_tune_layers': None,
'use_spa': False
})
})
yolact_resnet50_config = yolact_base_config.copy({
'name': 'yolact_resnet50',
'backbone': resnet50_backbone.copy({
'selected_layers': list(range(1, 4)),
'pred_scales': yolact_base_config.backbone.pred_scales,
'pred_aspect_ratios': yolact_base_config.backbone.pred_aspect_ratios,
'use_pixel_scales': True,
'preapply_sqrt': False,
'use_square_anchors': True, # This is for backward compatability with a bug
}),
})
yolact_resnet152_config = yolact_base_config.copy({
'name': 'yolact_resnet152',
'backbone': resnet152_backbone.copy({
'selected_layers': list(range(1, 4)),
'pred_scales': yolact_base_config.backbone.pred_scales,
'pred_aspect_ratios': yolact_base_config.backbone.pred_aspect_ratios,
'use_pixel_scales': True,
'preapply_sqrt': False,
'use_square_anchors': True, # This is for backward compatability with a bug
}),
})
yolact_edge_resnet50_config = yolact_edge_config.copy({
'name': 'yolact_edge_resnet50',
'backbone': yolact_resnet50_config.backbone
})
yolact_edge_vid_resnet50_config = yolact_edge_vid_config.copy({
'name': 'yolact_edge_vid_resnet50',
'backbone': yolact_resnet50_config.backbone
})
yolact_edge_vid_trainflow_resnet50_config = yolact_edge_vid_trainflow_config.copy({
'name': 'yolact_edge_vid_trainflow_resnet50',
'backbone': yolact_resnet50_config.backbone
})
yolact_edge_youtubevis_resnet50_config = yolact_edge_youtubevis_config.copy({
'name': 'yolact_edge_youtubevis_resnet50',
'backbone': yolact_resnet50_config.backbone
})
# Default config
cfg = yolact_edge_config.copy()
def set_cfg(config_name:str):
""" Sets the active config. Works even if cfg is already imported! """
global cfg
# Note this is not just an eval because I'm lazy, but also because it can
# be used like ssd300_config.copy({'max_size': 400}) for extreme fine-tuning
cfg.replace(eval(config_name))
def set_dataset(dataset_name:str):
""" Sets the dataset of the current config. """
cfg.dataset = eval(dataset_name)
| 36.515829
| 222
| 0.629532
|
88003416b608fa4dc11e671a2e38ef72c5c157da
| 4,262
|
py
|
Python
|
sim/lib/settings/town_settings_sanfrancisco.py
|
cculha4/COVID19Incubator
|
479b5c8d8f6c5069db2ff88578530ba6c84f8369
|
[
"MIT"
] | null | null | null |
sim/lib/settings/town_settings_sanfrancisco.py
|
cculha4/COVID19Incubator
|
479b5c8d8f6c5069db2ff88578530ba6c84f8369
|
[
"MIT"
] | null | null | null |
sim/lib/settings/town_settings_sanfrancisco.py
|
cculha4/COVID19Incubator
|
479b5c8d8f6c5069db2ff88578530ba6c84f8369
|
[
"MIT"
] | null | null | null |
import numpy as np
'''
Settings for town generation
'''
'''
TO DO:
Daily testing capacity vs daily number of tests?
'''
town_name = 'San_Francisco'
# Make sure to download country-specific population density data
# Source: Facebook's Data for Good program
# https://data.humdata.org/dataset/united-states-high-resolution-population-density-maps-demographic-estimates
# Number of people living within 30-meter grid tiles
population_path='lib/data/population/population_density_sf.csv' # Population density of SF extracted from the data (original data has 6 large files)
sites_path='lib/data/queries_sf/' # Directory containing OSM site query details
bbox = (37.7115, 37.8127, -122.5232, -122.3539) # Coordinate bounding box
# Population per age group in the region (matching the RKI age groups)
# Source: safegraph open census data
population_per_age_group = np.array([
38715, # 0-4
59181, # 5-14
30824, # 15-19
52567, # 20-24
329257, # 25-44
167051, # 45-59
136499, # 60-79
36188]) # 80+
town_population = 850282
region_population = population_per_age_group.sum()
# !!!TODO!!!: Daily testing capacity vs daily number of tests?
# Roughly 100k tests per day in Germany: https://www.rki.de/DE/Content/Infekt/EpidBull/Archiv/2020/Ausgaben/15_20.pdf?__blob=publicationFile
# daily_tests_unscaled = int(100000 * town_population / 83000000)
# SF: rough estimate based on the daily number of tests in the past 5 weekts: https://data.sfgov.org/stories/s/d96w-cdge
daily_tests_unscaled = 1200
# Information about household structure (set to None if not available)
# Source for US: https://www.census.gov/data/tables/2019/demo/families/cps-2019.html
household_info = {
'size_dist' : [28.37, 34.51, 15.07, 12.76, 5.78, 2.26, 1.25], # distribution of household sizes (1-7 people) from Table H1
'soc_role' : { # Each element is a probability. Each column should add up to 1. Simplification based on the bureau data
'children' : [1, 1, 1, 0, 0, 0, 0, 0], # age groups 0,1,2 (0-19) can be children (must be in a household with "parents")
'parents' : [0, 0, 0, 1, 1, 1, 0, 0], # age groups 3,4,5 (20-59) can be parents (They do not necessarily have kids)
'elderly' : [0, 0, 0, 0, 0, 0, 1, 1] # age groups 6,7 (60+) are elderly (live in a household of size 1 or 2 without children living with them)
}
}
def foo():
return 3
# proportion of all essential workers within each age group
prop_essential_per_age_group = np.array([
0, # 0-4
0, # 5-14
.01, # 15-19
.08, # 20-24
.45, # 25-44
.25, # 45-59
.20, # 60-79
0]) #
prop_population_per_age_group = (np.array(population_per_age_group) / float(sum(population_per_age_group)))
# def _essential_prop_per_age_group(prop_essential_total):
# return (prop_essential_per_age_group*prop_essential_total) / prop_population_per_age_group
def _essential_distribution():
ed = np.array([
0, # 0-4
0, # 5-14
0.0125, # 15-19
0.0682, # 20-24
0.4616, # 25-44
0.3889, # 45-59
0.0688, # 60-79
0]) # 80+
return ed
def _worker_mobility():
worker_mob_rate_per_types = [
[5.0, 0.0, 1.16, 2.30, 0.26, 0.5],
[0.0, 5.0, 1.16, 2.30, 0.26, 0.5],
[0.0, 0.0, 5.0, 2.30, 0.26, 0.5],
[0.0, 0.0, 1.16, 5.0, 0.26, 0.5],
[0.0, 0.0, 1.16, 2.30, 5.0, 0.5],
[0.0, 0.0, 1.16, 2.30, 0.26, 5.0] # placeholder. We don't have workers in home gatherings
]
worker_dur_mean_per_types = [
[5.0, 0.1, 0.70, 0.83, 0.55, 3.0],
[0.1, 5.0, 0.70, 0.83, 0.55, 3.0],
[0.1, 0.1, 5.0, 0.83, 0.55, 3.0],
[0.1, 0.1, 0.70, 5.0, 0.55, 3.0],
[0.1, 0.1, 0.70, 0.83, 5.0, 3.0],
[0.1, 0.1, 0.70, 0.83, 0.55, 5.0] # placeholder. We don't have workers in home gatherings
]
worker_variety_per_types = [ # this is not used in simulations
[1, 1, 10, 10, 2, 1],
[1, 1, 10, 10, 2, 1],
[1, 1, 10, 10, 2, 1],
[1, 1, 10, 10, 2, 1],
[1, 1, 10, 10, 2, 1],
[1, 1, 10, 10, 2, 1]
]
return worker_mob_rate_per_types, worker_dur_mean_per_types, worker_variety_per_types
| 34.096
| 150
| 0.623416
|
390d5bfed4121dba896d9cb60bb3e108176a2c88
| 4,245
|
py
|
Python
|
Pi_Files/cap_10hz_30.py
|
Zach-Switzer/Capacitive-PPU
|
3e781c5b4638d7638d78b17f5eee358d65d3ffe7
|
[
"MIT"
] | null | null | null |
Pi_Files/cap_10hz_30.py
|
Zach-Switzer/Capacitive-PPU
|
3e781c5b4638d7638d78b17f5eee358d65d3ffe7
|
[
"MIT"
] | null | null | null |
Pi_Files/cap_10hz_30.py
|
Zach-Switzer/Capacitive-PPU
|
3e781c5b4638d7638d78b17f5eee358d65d3ffe7
|
[
"MIT"
] | null | null | null |
#-------------------------------------------------------------#
import timeit
import time # Use for time calls
from subprocess import call # Use for turning off the Pi
import sys, select # Use for timed user input
import os
start=timeit.default_timer()
# Creating the function generator
os.chdir("/home/pi/PiBits/ServoBlaster/user") # changing the directory to acces$
call("sudo ./servod --cycle-time=1200us --max=100% --min=0us", shell=True) # $
call("pwd", shell=True) # printing the current directory to make sure we've cha$
time.sleep(0.1)
ServoBlaster = open('/dev/servoblaster', 'w') # opening servoblaster
count = 1
T1=timeit.default_timer()+30
while (timeit.default_timer()<T1):
# Turn on the IGBT to charge the inductor
for i in range (0,8):
ServoBlaster.write('P1-12=600us' + '\n') # pulse width of 200us
ServoBlaster.flush()
#ServoBlaster.write('P1-15=0%' + '\n') # pulse width of 200us
#ServoBlaster.flush()
print('Inductor pulsing!')
time.sleep(.0012)
ServoBlaster.write('P1-12=0%' + '\n')
ServoBlaster.flush()
#print('turning off pin 12')
time.sleep(.0001)
print(timeit.default_timer()-start)
# Release the capacitors
start1=timeit.default_timer()
ServoBlaster.write('P1-11=100%' + '\n')
ServoBlaster.flush()
print(timeit.default_timer()-start1)
print(timeit.default_timer()-start)
start2=timeit.default_timer()
time.sleep(.005)
ServoBlaster.write('P1-15=100%' + '\n')
ServoBlaster.flush()
print(timeit.default_timer()-start2)
print(timeit.default_timer()-start1)
print(timeit.default_timer()-start)
start3=timeit.default_timer()
time.sleep(.005)
ServoBlaster.write('P1-16=100%' + '\n')
ServoBlaster.flush()
print(timeit.default_timer()-start3)
print(timeit.default_timer()-start1)
print(timeit.default_timer()-start)
start4=timeit.default_timer()
time.sleep(.005)
ServoBlaster.write('P1-18=100%' + '\n')
ServoBlaster.flush()
print(timeit.default_timer()-start4)
print(timeit.default_timer()-start1)
print(timeit.default_timer()-start)
start5=timeit.default_timer()
time.sleep(.0001)
print(timeit.default_timer()-start5)
time.sleep(.0834)
# Close the capacitors
ServoBlaster.write('P1-11=0%' + '\n')
ServoBlaster.flush()
#time.sleep(0.01)
ServoBlaster.write('P1-15=0%' + '\n')
ServoBlaster.flush()
#time.sleep(0.01)
ServoBlaster.write('P1-16=0%' + '\n')
ServoBlaster.flush()
#time.sleep(0.01)
ServoBlaster.write('P1-18=0%' + '\n')
ServoBlaster.flush()
time.sleep(.0002)
#print('kill the loop now: 5 seconds remain')
#print('5')
#time.sleep(1)
#print('4')
#time.sleep(1)
#print('3')
#time.sleep(1)
#print('2')
#time.sleep(1)
#print('1')
#time.sleep(1)
#print('0')
#count = count+1
#print('iteration number: '+str(count))
print('we out!!!')
# Release the capacitors
start1=timeit.default_timer()
ServoBlaster.write('P1-11=100%' + '\n')
ServoBlaster.flush()
print(timeit.default_timer()-start1)
print(timeit.default_timer()-start)
start2=timeit.default_timer()
time.sleep(.005)
ServoBlaster.write('P1-15=100%' + '\n')
ServoBlaster.flush()
print(timeit.default_timer()-start2)
print(timeit.default_timer()-start1)
print(timeit.default_timer()-start)
start3=timeit.default_timer()
time.sleep(.005)
ServoBlaster.write('P1-16=100%' + '\n')
ServoBlaster.flush()
print(timeit.default_timer()-start3)
print(timeit.default_timer()-start1)
print(timeit.default_timer()-start)
start4=timeit.default_timer()
time.sleep(.005)
ServoBlaster.write('P1-18=100%' + '\n')
ServoBlaster.flush()
print(timeit.default_timer()-start4)
print(timeit.default_timer()-start1)
print(timeit.default_timer()-start)
start5=timeit.default_timer()
time.sleep(0.005)
print(timeit.default_timer()-start5)
# Close the capacitors
ServoBlaster.write('P1-11=0%' + '\n')
ServoBlaster.flush()
#time.sleep(0.01)
ServoBlaster.write('P1-15=0%' + '\n')
ServoBlaster.flush()
#time.sleep(0.01)
ServoBlaster.write('P1-16=0%' + '\n')
ServoBlaster.flush()
#time.sleep(0.01)
ServoBlaster.write('P1-18=0%' + '\n')
ServoBlaster.flush()
#time.sleep(0.1)
| 29.894366
| 80
| 0.667373
|
d283c537aa9eebd0c0b9ff083e3fedefaedcf8da
| 816
|
py
|
Python
|
src/logger.py
|
dlotnyk/movieorg
|
096fc2014b8877bba9930c26f79186797a0c7856
|
[
"MIT"
] | null | null | null |
src/logger.py
|
dlotnyk/movieorg
|
096fc2014b8877bba9930c26f79186797a0c7856
|
[
"MIT"
] | null | null | null |
src/logger.py
|
dlotnyk/movieorg
|
096fc2014b8877bba9930c26f79186797a0c7856
|
[
"MIT"
] | null | null | null |
import logging
from logging.handlers import RotatingFileHandler
def log_settings():
# Logger definitions
log_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(funcName)s - line: %(lineno)d - %(message)s')
logFile = "app_calc.log"
my_handler = RotatingFileHandler(logFile, mode="a", maxBytes=20*1024*1024, backupCount=2, encoding=None, delay=False)
my_handler.setFormatter(log_formatter)
my_handler.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_formatter)
console_handler.setLevel(logging.DEBUG)
app_log = logging.getLogger("AbDatabase")
app_log.setLevel(logging.DEBUG)
if len(app_log.handlers) < 2:
app_log.addHandler(my_handler)
app_log.addHandler(console_handler)
return app_log
| 40.8
| 121
| 0.737745
|
805717c9740fc0957a803f4a70b2c656345830c4
| 13,863
|
py
|
Python
|
tests/test_rsa_key.py
|
tomwei7/libtrust-py3
|
b1d71eee57b95621b5111cebd3c44751442740c5
|
[
"Apache-2.0"
] | 1
|
2020-03-26T13:17:10.000Z
|
2020-03-26T13:17:10.000Z
|
tests/test_rsa_key.py
|
tomwei7/libtrust-py3
|
b1d71eee57b95621b5111cebd3c44751442740c5
|
[
"Apache-2.0"
] | null | null | null |
tests/test_rsa_key.py
|
tomwei7/libtrust-py3
|
b1d71eee57b95621b5111cebd3c44751442740c5
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
import json
import unittest
from libtrust import hash as hash_
from libtrust import rsa_key
from tests import fixtures_path
class RSAKeyTest(unittest.TestCase):
def setUp(self):
with open(fixtures_path('private.pem'), 'rb') as f:
self.private_key = rsa_key.RSAPrivateKey.from_pem(f.read())
with open(fixtures_path('public.pem'), 'rb') as f:
self.public_key = rsa_key.RSAPublicKey.from_pem(f.read())
def test_to_pem(self):
self.private_key.pem_block()
self.public_key.pem_block()
def test_key_id(self):
pub_key_key_id = self.public_key.key_id()
priv_key_id = self.private_key.key_id()
self.assertEqual('IIYO:OWAZ:MBMG:2SIK:IK2I:OP5Z:H6QR:KN6Y:QUGO:BUWN:TYW3:JXVW', pub_key_key_id)
self.assertEqual(pub_key_key_id, priv_key_id)
def test_marshal_json(self):
pub_key_json = self.public_key.marshal_json()
priv_key_json = self.private_key.marshal_json()
pub_key_json_origin = r"""{"e":"AQAB","kid":"IIYO:OWAZ:MBMG:2SIK:IK2I:OP5Z:H6QR:KN6Y:QUGO:BUWN:TYW3:JXVW","kty":"RSA","n":"wq1mCmgn460MC6MnCqranQNTgmKuKPl7bNH7Qc6hBDGHlnIjU6q_h2KXF37TC5Y9tsKvQ4b8jd0Sf0dXFHml8qunSvNnqsSvoD8tSPUKqXS6jrlbGSQXhya7BL1RPGccD5K1xrV73QlI6uFPd3APRQYij5EOB8IOWEQujJk_8Mjc0EC9zvk5TUJb59hkOUPZ3CkvSBNLNS8wpQI98FRnIzHjuaNicqve8054oxmDKifHWy0nnF135cXW8zkH3Zto1q89zD2g-zcVxLcRP84Uhe0nSQyg7vEYl4Wl74Eo6_89qL2yE0mEiQN245ACA5B8WFV_t3j_OD3ydOCaAOg28vQtzcZ1gh2Ev4RxeR7bKq58g-R0-MMwl7nnW29mbCkcgdVVR4YPmglP7Vb6w7_NbqFhnxx4E3A05AeevHdMdYCrtgQwogvIhdOHLcVQxJgwy1d2Lg_mv9rovhCJ7d3XaNEYym6CplCHPMtfnU1LCVkA6b44pFaOVjsAQ8FviFtGXQAToRtwoszSarzslHKYdPoSGFOsgNJgW67iViOYqGPD97rgJA0VPm0POMNHGw_R6o-08KhDF_OI1EDckmjXhUggY_WCqWDxD77Ezd_wr9Zlbv_uSIEL9ifvBLq06lLcXMLrQbJrwMbDrngMZMAcUkTzThmtxNs4uwu45R-zfKc"}"""
priv_key_json_origin = r"""{"d":"tu9aQ808LqYd-5GEznFenMzTXGJ-ZeKKKOfowx34EIi6cJUwvR5mfEIY2OtERk8YDvVC3KGsEWL8Tr4rBgKJ_k9vFO9FKyNIJb04QKaDLlmSNSvYfvd7ZHTwqLN98tSxebDTP7aqfjqLWqv-kK2sq5_oOiCEnqWr9SWc2GHpw8n8NXWg5y0qu37v_h1JkMZBorDQzVnUAlYlz-kbawrlIB1xcLAngroe92N12U3QA3z9yJ_V6Qmr8S7HniapTYUMLzDdUV9YNri8q-2bN-nfPzprACnt0JqeEUR1eWpVme5vcnFPNPCQqm-m-JAKVG8haaBuM2pv6dnMTCgCj3emqWLVfBoc3qmi1KJT_dG54GRepIyN82jFDByKqQGMMO5_Chf2DlRYQYBrkPI5hIZLvbU-a1K5Uf1wauNpGgiGCEjxiXsYGUPyCjAgMmNwnNjfOO7U5KQQMV1PbEj1iPU0xw_Q7adqKd4UeD_rwaTo00KcH6K7_1pFZP3UrkcQ5de9nI_jULIF7YCPqZxs5_dpK8HGwF5VroYIjyVm5AVh9xaE3sugxf8nsdopLybIwcR7nk2RCibW7ClbsJd7eTrYiuPBI50Lb3I-CLczo6VgvlnnqhVDs_kYDZA9c4j11ayAW7l4zc47cPjK6M-ggvL4zqc2n7Ba0Z2Med07hiNrHwE","dp":"MUMOUuBHrByzXNNEsKXFTOOFvOt_eVSorlL2KTcQQjHTSoxv7jY1yqfx41qNNRn6rlcjwGf3_GLuN5bq8zHX37vDD2O5uQDbDmGZc4W0X4E7ZDAY7UTMi_DONzf7Pu-8pN7mBneeLSuUoL-lduNLzC0b-0kOLHG7WCGA5Y9wJT8_fz9h25Yf8BmCe3peuDMwT5E-RHlnk4epQFno_bVz7ZVawE9EpE7FY3l34JOSKrh0hIIz_w1QmFt1fabSfrueM3igaibrc5DyeRmAT0xtLQUUbuzXvycmU-S6VqOwQET6LEVsCaZKGwzRqXXwSTIsyAdNHTg1Oyqdu1jsxt3u8Q","dq":"fKcxyUxSlDYOBky14bORjN3fEujFMgZd4cdIWIyaCzgWMPZIMAJKRfTguH76Msg2rZQ5sIuUetXfBdF7o7k7Zndl_inNuirRclb3Ggty7wfVddk-qIfbGwX_rsqD_H4hnFj2ARBSZO6MAua99ZWERqHpi50vqwBG-iftm9MDEbyp3qUixqHH765bXcOnm2abHOwD_F_Oj1QXECdh76OteZ_13Gz7e9dz9xn2QeEs4_Sg84LUCTDcfy42uGRMx2kKHzUEGh120tqLY5X0sRG8wgUgA4e-By-yAXODjCbApfxFCz1ObCVwJqXmCk66nTp1n1X2du11ht4SWouOooM83Q","e":"AQAB","kid":"IIYO:OWAZ:MBMG:2SIK:IK2I:OP5Z:H6QR:KN6Y:QUGO:BUWN:TYW3:JXVW","kty":"RSA","n":"wq1mCmgn460MC6MnCqranQNTgmKuKPl7bNH7Qc6hBDGHlnIjU6q_h2KXF37TC5Y9tsKvQ4b8jd0Sf0dXFHml8qunSvNnqsSvoD8tSPUKqXS6jrlbGSQXhya7BL1RPGccD5K1xrV73QlI6uFPd3APRQYij5EOB8IOWEQujJk_8Mjc0EC9zvk5TUJb59hkOUPZ3CkvSBNLNS8wpQI98FRnIzHjuaNicqve8054oxmDKifHWy0nnF135cXW8zkH3Zto1q89zD2g-zcVxLcRP84Uhe0nSQyg7vEYl4Wl74Eo6_89qL2yE0mEiQN245ACA5B8WFV_t3j_OD3ydOCaAOg28vQtzcZ1gh2Ev4RxeR7bKq58g-R0-MMwl7nnW29mbCkcgdVVR4YPmglP7Vb6w7_NbqFhnxx4E3A05AeevHdMdYCrtgQwogvIhdOHLcVQxJgwy1d2Lg_mv9rovhCJ7d3XaNEYym6CplCHPMtfnU1LCVkA6b44pFaOVjsAQ8FviFtGXQAToRtwoszSarzslHKYdPoSGFOsgNJgW67iViOYqGPD97rgJA0VPm0POMNHGw_R6o-08KhDF_OI1EDckmjXhUggY_WCqWDxD77Ezd_wr9Zlbv_uSIEL9ifvBLq06lLcXMLrQbJrwMbDrngMZMAcUkTzThmtxNs4uwu45R-zfKc","p":"6ql31IHaMnhXtM0Bv13awqXzujVMfdzVEpBA1NEGdiiEaroLlfpX6tHmrlJnYEJjEk5pwXldTcu5fOBgphEZ985VR8O7nOhxtsYNt0RJe_34SSeUeZNK2kCSB-4uy8TSICUepzL1e59sj7pzHXNz6fxbG-SjGk8dUphgS1QsmJGsFw1hcjYs_yvGfAPpRb0Y8qs4yM3yeKLKq-qW3IFjiAsrrw7w2IEXmUQgyXhdv9DySSPr7WrDL2AV5yV6WouloDABPhlV-ZtCsIvN_5Eu9GTN8kEZ3TiOg7j2IFcP0MKanPbQT4ivYXIAfS7Pt2cVlpwGfCxBtaRk425rGZ3viQ","q":"1GEn69ysWOsGN-U_R9ifaK7RsrfssRsjr7FE_KpWfXqBaUaKLYL6DUc3J4RIr9IP4MQw9gMl6TOXI16G-d6jXtsKODIFHGvkWTxs_iPkOWnBPTTHgklCIdev9MfVxctN-dB_UI6ayEZ_mooWoUQYaZEEwxHRY1Xn0VpWJMSCGfQf3FJEoI3u5HnWaz_HjrCDbJ7u0ZyXJdUn_-IXjZFDsTz_lwaq_dmVsW2KcOVASKHIBGQXdAGwN6fe7XO3MJvLt9oB4mGgCKJgON8IJx9OF0CplF66QmduIAIptnYzwYbeTtcU1y0IAApOj0dj3jmnOzBXnYdbZ7L3mu7glnlOrw","qi":"Qh5chO-_2sdaREPCXOGXMMmw2Ajci-rKAE9HFWaWXSRaduf_P_tPaBV4V4wnFsw6MYaZECgK1wW-u2FpYWdCWF1AyeSIx7egATmfwdpHDPF9ebjSneem9KNhrPPc0MXmYR--cPAVhgtvTq4IV-x32kDUWJQN6VTgvwWmFjL7lrxiq30_TyYopi2pqinnFRGuMj8gWDRi_YmOwrii49t6mmteYS48R8H59DqazVqXIMMc2nIUt9LZs5QGzpoYkyBsMdCIFmJV5mHFeCxVD6S5-3rvD_fCSiCRT94YQbYdHkKQnn3JpUmnpIwraBxNJErjRs-PRTAzMSAts-im_bCAXQ"}"""
self.assertEqual(pub_key_json_origin, pub_key_json)
self.assertEqual(priv_key_json_origin, priv_key_json)
def test_from_jwk(self):
pub_key_json = self.public_key.marshal_json()
pub_key = rsa_key.rsa_public_key_from_map(json.loads(pub_key_json))
self.assertEqual(pub_key_json, pub_key.marshal_json())
def test_sign(self):
message = StringIO('Hello, World!'.encode('utf-8'))
sig_algs = (hash_.RS256, hash_.RS384, hash_.RS512)
origin_sig = (
[47, 53, 27, 154, 98, 40, 87, 246, 73, 49, 80, 241, 186, 20, 180, 75, 78, 152, 83, 140, 12, 163, 134, 214, 100, 92,
80, 104, 65, 36, 88, 234, 166, 131, 135, 85, 242, 96, 111, 191, 36, 177, 18, 245, 217, 173, 194, 139, 87, 220, 104,
213, 79, 205, 31, 137, 79, 137, 147, 45, 127, 139, 137, 234, 161, 175, 64, 21, 215, 232, 237, 138, 115, 212, 216,
219, 100, 104, 189, 113, 59, 169, 99, 43, 227, 122, 155, 51, 250, 244, 53, 247, 99, 249, 174, 72, 175, 131, 122, 166,
198, 148, 48, 54, 71, 15, 210, 18, 156, 57, 34, 107, 74, 76, 224, 62, 227, 228, 208, 139, 153, 252, 142, 37, 73, 54,
163, 165, 230, 12, 37, 54, 188, 147, 82, 239, 96, 56, 71, 10, 199, 180, 44, 213, 111, 101, 163, 246, 162, 239, 105,
2, 46, 121, 142, 153, 6, 90, 161, 254, 244, 52, 168, 82, 215, 181, 9, 237, 84, 116, 131, 38, 145, 126, 148, 44, 170,
119, 2, 9, 26, 184, 7, 86, 93, 22, 129, 63, 211, 196, 92, 219, 164, 168, 76, 76, 78, 1, 244, 172, 142, 134, 162, 75,
253, 236, 138, 193, 182, 16, 224, 2, 109, 2, 62, 40, 173, 30, 205, 99, 97, 189, 245, 136, 84, 196, 172, 52, 151, 208,
101, 228, 184, 90, 208, 73, 202, 81, 6, 22, 134, 141, 124, 186, 110, 227, 68, 145, 253, 244, 2, 154, 242, 33, 147,
115, 206, 138, 102, 88, 223, 184, 2, 193, 56, 170, 9, 5, 116, 22, 205, 36, 152, 51, 196, 35, 19, 54, 2, 23, 93, 120,
215, 107, 137, 79, 79, 186, 151, 186, 252, 146, 100, 47, 217, 232, 197, 218, 164, 16, 208, 37, 123, 126, 158, 103,
221, 111, 92, 24, 172, 223, 219, 136, 196, 20, 91, 163, 152, 195, 97, 155, 237, 11, 143, 98, 74, 30, 182, 186, 255,
24, 212, 138, 252, 41, 121, 169, 166, 125, 108, 116, 15, 71, 175, 241, 238, 22, 163, 149, 184, 244, 99, 193, 77, 242,
201, 20, 133, 41, 32, 26, 112, 48, 250, 148, 117, 80, 69, 179, 119, 202, 250, 204, 151, 196, 94, 25, 191, 40, 173,
60, 116, 234, 159, 37, 59, 43, 223, 253, 98, 31, 103, 243, 140, 150, 132, 252, 244, 88, 69, 158, 56, 86, 57, 58, 189,
80, 164, 213, 93, 169, 112, 231, 153, 150, 37, 185, 153, 94, 2, 104, 146, 146, 141, 80, 104, 129, 37, 74, 184, 8,
179, 228, 59, 79, 156, 19, 47, 193, 13, 238, 187, 220, 133, 176, 150, 13, 140, 162, 84, 217, 248, 66, 101, 206, 203,
8, 218, 106, 97, 102, 194, 106, 56, 86, 40, 64, 183, 16, 94, 127, 232, 119, 69, 56, 44, 182, 215, 34, 124, 167, 42,
125, 8, 172, 19, 144, 143, 166, 145, 24, 18, 167, 9, 231, 227, 83, 29, 149, 174, 184, 195, 106, 38, 97, 197, 175,
206, 155, 172, 157],
[78, 220, 151, 16, 42, 6, 220, 1, 70, 30, 1, 181, 74, 193, 140, 54, 28, 26, 140, 60, 153, 128, 54, 68, 202, 42, 218,
127, 230, 140, 60, 120, 92, 229, 32, 15, 178, 123, 253, 132, 100, 54, 96, 6, 30, 148, 5, 168, 106, 48, 88, 244, 134,
192, 189, 225, 67, 96, 8, 210, 8, 12, 135, 250, 172, 255, 113, 1, 2, 126, 25, 173, 76, 96, 193, 165, 217, 109, 229,
15, 96, 200, 68, 42, 167, 164, 224, 84, 210, 4, 180, 56, 104, 245, 119, 24, 16, 31, 235, 1, 150, 181, 25, 11, 201,
29, 48, 206, 223, 54, 191, 246, 29, 127, 86, 137, 136, 84, 140, 172, 51, 240, 95, 156, 41, 245, 86, 215, 92, 50, 237,
74, 211, 31, 85, 41, 14, 142, 128, 213, 229, 29, 224, 163, 252, 102, 9, 148, 216, 128, 190, 143, 150, 208, 12, 231,
81, 105, 167, 161, 192, 65, 98, 28, 248, 215, 193, 167, 48, 196, 80, 156, 114, 134, 216, 231, 95, 232, 47, 117, 40,
110, 39, 247, 53, 61, 201, 216, 47, 149, 153, 39, 246, 86, 255, 79, 134, 55, 254, 187, 111, 235, 87, 44, 55, 85, 108,
144, 36, 137, 201, 43, 145, 216, 30, 221, 18, 101, 128, 105, 162, 50, 20, 92, 42, 121, 142, 232, 159, 20, 37, 136,
64, 160, 21, 216, 201, 49, 146, 43, 22, 92, 169, 162, 189, 7, 218, 50, 235, 246, 238, 212, 102, 153, 38, 218, 194, 4,
103, 168, 53, 50, 148, 94, 120, 216, 134, 122, 45, 40, 170, 27, 154, 248, 162, 18, 147, 182, 138, 209, 1, 50, 114,
182, 215, 132, 104, 186, 58, 97, 0, 163, 249, 105, 170, 254, 76, 26, 161, 247, 51, 195, 4, 151, 230, 32, 253, 120,
48, 155, 74, 168, 158, 222, 142, 17, 253, 62, 68, 46, 69, 145, 204, 188, 41, 194, 184, 210, 211, 146, 228, 116, 143,
239, 131, 203, 63, 89, 234, 129, 29, 122, 48, 131, 8, 103, 36, 110, 9, 126, 30, 85, 211, 153, 170, 125, 79, 29, 244,
213, 121, 12, 144, 142, 182, 165, 179, 198, 245, 86, 173, 0, 96, 189, 195, 129, 39, 37, 60, 13, 98, 112, 222, 134,
153, 12, 10, 194, 223, 166, 232, 122, 0, 162, 80, 35, 164, 253, 34, 19, 237, 177, 229, 141, 227, 166, 108, 183, 49,
246, 204, 17, 45, 218, 30, 73, 162, 189, 167, 204, 142, 68, 3, 194, 213, 38, 79, 194, 55, 195, 29, 192, 99, 135, 72,
24, 215, 8, 155, 97, 88, 9, 185, 187, 236, 217, 34, 156, 28, 111, 221, 209, 110, 163, 20, 90, 163, 251, 15, 40, 19,
226, 233, 115, 243, 36, 96, 180, 122, 90, 191, 203, 34, 32, 106, 34, 239, 24, 17, 89, 36, 221, 190, 246, 225, 141,
212, 200, 15, 11, 192, 11, 105, 83, 138, 98, 64, 177, 1, 71, 67, 105, 239, 164, 161, 123, 92, 21, 67, 51, 177, 161],
[47, 17, 171, 73, 252, 150, 144, 127, 249, 242, 4, 175, 7, 192, 226, 130, 145, 236, 156, 65, 61, 231, 21, 197, 174,
141, 59, 93, 13, 51, 155, 30, 3, 153, 0, 68, 220, 36, 252, 141, 0, 208, 226, 92, 71, 16, 159, 46, 3, 61, 144, 110,
103, 38, 85, 131, 45, 31, 219, 8, 27, 117, 72, 101, 124, 60, 44, 105, 194, 104, 183, 214, 101, 180, 235, 72, 144,
230, 109, 103, 55, 215, 67, 189, 183, 9, 48, 206, 49, 211, 39, 118, 80, 192, 141, 48, 226, 250, 118, 255, 236, 163,
20, 207, 213, 158, 5, 12, 200, 163, 201, 51, 253, 34, 91, 75, 41, 30, 67, 48, 161, 75, 44, 70, 45, 31, 76, 179, 171,
136, 202, 20, 200, 227, 2, 18, 98, 197, 93, 13, 121, 181, 59, 92, 16, 204, 27, 123, 29, 43, 37, 246, 236, 43, 40,
173, 216, 255, 181, 85, 117, 193, 200, 208, 208, 171, 95, 103, 175, 188, 120, 159, 201, 142, 160, 4, 200, 14, 219,
128, 142, 70, 147, 229, 175, 39, 46, 142, 66, 98, 164, 103, 239, 197, 108, 28, 202, 27, 210, 63, 118, 127, 178, 137,
77, 209, 208, 34, 84, 56, 197, 181, 80, 243, 186, 132, 96, 20, 251, 28, 151, 179, 6, 140, 184, 204, 121, 89, 227, 51,
225, 175, 160, 188, 157, 253, 72, 184, 241, 225, 210, 231, 82, 35, 139, 228, 177, 51, 178, 49, 101, 181, 196, 141,
98, 55, 192, 210, 193, 224, 35, 113, 233, 219, 93, 185, 205, 173, 86, 128, 51, 149, 206, 161, 104, 67, 191, 146, 46,
219, 213, 67, 144, 254, 101, 63, 171, 65, 215, 203, 10, 19, 112, 4, 104, 11, 162, 132, 247, 157, 141, 103, 231, 133,
98, 127, 116, 97, 250, 170, 130, 79, 214, 239, 242, 169, 33, 114, 218, 76, 184, 46, 12, 64, 104, 236, 61, 238, 159,
163, 36, 33, 170, 168, 77, 25, 103, 238, 63, 84, 203, 11, 214, 148, 61, 181, 205, 72, 87, 229, 46, 207, 119, 173,
215, 187, 153, 193, 227, 212, 8, 182, 28, 153, 25, 33, 234, 78, 57, 20, 242, 28, 131, 234, 232, 26, 155, 215, 41, 89,
209, 7, 103, 241, 47, 226, 155, 12, 135, 152, 93, 92, 243, 38, 150, 45, 114, 252, 120, 126, 25, 131, 173, 89, 84,
208, 117, 186, 252, 168, 134, 128, 205, 203, 176, 29, 203, 142, 218, 61, 67, 126, 182, 66, 157, 248, 246, 246, 189,
233, 127, 67, 249, 158, 218, 83, 239, 52, 211, 201, 162, 101, 113, 1, 220, 113, 251, 102, 213, 22, 241, 63, 201, 193,
62, 98, 156, 119, 144, 98, 22, 40, 255, 158, 224, 236, 248, 170, 206, 186, 231, 11, 205, 167, 107, 33, 4, 151, 95,
212, 39, 128, 6, 140, 99, 131, 114, 219, 65, 198, 12, 46, 169, 236, 123, 64, 105, 76, 59, 233, 250, 249, 82, 201,
174, 137, 79, 123, 111, 191, 241, 39]
)
for i, sa in enumerate(sig_algs):
message.seek(0)
sig, alg = self.private_key.sign(message, sa.hash_id)
message.seek(0)
self.assertTrue(self.public_key.verify(message, alg, sig))
self.assertEqual(b''.join([bytes([c]) for c in origin_sig[i]]), sig)
| 114.570248
| 3,258
| 0.661257
|
fd0fc9b821649cec6511074d95de91287a973a85
| 2,168
|
py
|
Python
|
src/Lib/site-packages/pygame/Surface.py
|
litie/brython
|
05cb92912a2c7fd2b393881c271471f39c01fec2
|
[
"BSD-3-Clause"
] | 1
|
2019-12-18T04:58:34.000Z
|
2019-12-18T04:58:34.000Z
|
src/Lib/site-packages/pygame/Surface.py
|
litie/brython
|
05cb92912a2c7fd2b393881c271471f39c01fec2
|
[
"BSD-3-Clause"
] | null | null | null |
src/Lib/site-packages/pygame/Surface.py
|
litie/brython
|
05cb92912a2c7fd2b393881c271471f39c01fec2
|
[
"BSD-3-Clause"
] | null | null | null |
from browser import document, html, window
from javascript import console
import pygame.Rect
class Surface:
def __init__(self, dim, depth=16):
self._width=dim[0]
self._height=dim[1]
self._depth=depth
self._canvas=html.CANVAS(width=self._width, height=self._height)
self._context=self._canvas.getContext('2d')
document['py_div'] <= self._canvas
def blit(self, source, dest, area=None, special_flags=0):
if area is None:
self._context.drawImage(source, dest[0], dest[1])
return source.width, source.height
_ctx=source.getContext('2d')
_subset=_ctx.getImageData(area[0][0],area[0][1], area[1],[0], area[1][1])
# we want just a subset of the source image copied
self._context.drawImage(_subset, dest[0], dest[1])
return _subset.width, _subset.height
def convert(self, surface):
## fix me...
return surface
def copy(self):
_imgdata=self._context.toDataURL('image/png')
_canvas=html.CANVAS(width=self._canvas.width,height=self._canvas.height)
_ctx=_canvas.getContext('2d')
_ctx.drawImage(_imgdata, 0, 0)
return _canvas
def fill(self, color):
""" fill canvas with this color """
self._context.rect(0,0,self._width,self._height)
self._context.fillStyle="rgb(%s,%s,%s)" % color
self._context.fill()
def get_height(self):
return int(self._canvas.height)
def get_width(self):
return int(self._canvas.width)
def scroll(self, dx=0, dy=0):
_imgdata=self._context.toDataURL('image/png')
self._context.drawImage(_imgdata, dx, dy)
def get_at(self, pos):
#returns rgb
return self._context.getImageData(pos[0], pos[1],1,1).data
def set_at(self, pos, color):
self._context.fillStyle='rgb(%s,%s,%s)' % color
self._fillRect(pos[0], pos[1], 1, 1)
def get_size(self):
return self._canvas.width, self._canvas.height
def get_width(self):
return self._canvas.width
def get_height(self):
return self._canvas.height
def get_rect(self):
return pygame.Rect(0, 0, self._canvas.width, self._canvas.height)
| 28.906667
| 79
| 0.654982
|
cfb9b474b9f3ddafffd994ebfbbca2a5fcb7bd1b
| 12,753
|
py
|
Python
|
src/ramstk/models/programdb/similar_item/record.py
|
TahaEntezari/ramstk
|
f82e5b31ef5c4e33cc02252263247b99a9abe129
|
[
"BSD-3-Clause"
] | 26
|
2019-05-15T02:03:47.000Z
|
2022-02-21T07:28:11.000Z
|
src/ramstk/models/programdb/similar_item/record.py
|
TahaEntezari/ramstk
|
f82e5b31ef5c4e33cc02252263247b99a9abe129
|
[
"BSD-3-Clause"
] | 815
|
2019-05-10T12:31:52.000Z
|
2022-03-31T12:56:26.000Z
|
src/ramstk/models/programdb/similar_item/record.py
|
TahaEntezari/ramstk
|
f82e5b31ef5c4e33cc02252263247b99a9abe129
|
[
"BSD-3-Clause"
] | 9
|
2019-04-20T23:06:29.000Z
|
2022-01-24T21:21:04.000Z
|
# pylint: disable=duplicate-code
# -*- coding: utf-8 -*-
#
# ramstk.models.similar_item.record.py is part of The RAMSTK Project
#
# All rights reserved.
# Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com
"""Similar Item Record Model."""
# Third Party Imports
# noinspection PyPackageRequirements
from sqlalchemy import Column, Float, ForeignKey, Integer, String
# RAMSTK Package Imports
from ramstk.db import RAMSTK_BASE
from ramstk.models import RAMSTKBaseRecord
# pylint: disable=R0902
class RAMSTKSimilarItemRecord(RAMSTK_BASE, RAMSTKBaseRecord):
"""Class to represent ramstk_similar_item table in RAMSTK Program database.
This table shares a Many-to-One relationship with ramstk_hardware.
"""
__defaults__ = {
"change_description_1": "",
"change_description_2": "",
"change_description_3": "",
"change_description_4": "",
"change_description_5": "",
"change_description_6": "",
"change_description_7": "",
"change_description_8": "",
"change_description_9": "",
"change_description_10": "",
"change_factor_1": 1.0,
"change_factor_2": 1.0,
"change_factor_3": 1.0,
"change_factor_4": 1.0,
"change_factor_5": 1.0,
"change_factor_6": 1.0,
"change_factor_7": 1.0,
"change_factor_8": 1.0,
"change_factor_9": 1.0,
"change_factor_10": 1.0,
"environment_from_id": 0,
"environment_to_id": 0,
"function_1": "0",
"function_2": "0",
"function_3": "0",
"function_4": "0",
"function_5": "0",
"similar_item_method_id": 1,
"parent_id": 0,
"quality_from_id": 0,
"quality_to_id": 0,
"result_1": 0.0,
"result_2": 0.0,
"result_3": 0.0,
"result_4": 0.0,
"result_5": 0.0,
"temperature_from": 30.0,
"temperature_to": 30.0,
"user_blob_1": "",
"user_blob_2": "",
"user_blob_3": "",
"user_blob_4": "",
"user_blob_5": "",
"user_float_1": 0.0,
"user_float_2": 0.0,
"user_float_3": 0.0,
"user_float_4": 0.0,
"user_float_5": 0.0,
"user_int_1": 0,
"user_int_2": 0,
"user_int_3": 0,
"user_int_4": 0,
"user_int_5": 0,
}
__tablename__ = "ramstk_similar_item"
__table_args__ = {"extend_existing": True}
revision_id = Column(
"fld_revision_id",
Integer,
ForeignKey("ramstk_revision.fld_revision_id", ondelete="CASCADE"),
nullable=False,
)
hardware_id = Column(
"fld_hardware_id",
Integer,
ForeignKey("ramstk_hardware.fld_hardware_id", ondelete="CASCADE"),
primary_key=True,
nullable=False,
)
change_description_1 = Column(
"fld_change_description_1", String, default=__defaults__["change_description_1"]
)
change_description_2 = Column(
"fld_change_description_2", String, default=__defaults__["change_description_2"]
)
change_description_3 = Column(
"fld_change_description_3", String, default=__defaults__["change_description_3"]
)
change_description_4 = Column(
"fld_change_description_4", String, default=__defaults__["change_description_4"]
)
change_description_5 = Column(
"fld_change_description_5", String, default=__defaults__["change_description_5"]
)
change_description_6 = Column(
"fld_change_description_6", String, default=__defaults__["change_description_6"]
)
change_description_7 = Column(
"fld_change_description_7", String, default=__defaults__["change_description_7"]
)
change_description_8 = Column(
"fld_change_description_8", String, default=__defaults__["change_description_8"]
)
change_description_9 = Column(
"fld_change_description_9", String, default=__defaults__["change_description_9"]
)
change_description_10 = Column(
"fld_change_description_10",
String,
default=__defaults__["change_description_10"],
)
change_factor_1 = Column(
"fld_change_factor_1", Float, default=__defaults__["change_factor_1"]
)
change_factor_2 = Column(
"fld_change_factor_2", Float, default=__defaults__["change_factor_2"]
)
change_factor_3 = Column(
"fld_change_factor_3", Float, default=__defaults__["change_factor_3"]
)
change_factor_4 = Column(
"fld_change_factor_4", Float, default=__defaults__["change_factor_4"]
)
change_factor_5 = Column(
"fld_change_factor_5", Float, default=__defaults__["change_factor_5"]
)
change_factor_6 = Column(
"fld_change_factor_6", Float, default=__defaults__["change_factor_6"]
)
change_factor_7 = Column(
"fld_change_factor_7", Float, default=__defaults__["change_factor_7"]
)
change_factor_8 = Column(
"fld_change_factor_8", Float, default=__defaults__["change_factor_8"]
)
change_factor_9 = Column(
"fld_change_factor_9", Float, default=__defaults__["change_factor_9"]
)
change_factor_10 = Column(
"fld_change_factor_10", Float, default=__defaults__["change_factor_10"]
)
environment_from_id = Column(
"fld_environment_from_id", Integer, default=__defaults__["environment_from_id"]
)
environment_to_id = Column(
"fld_environment_to_id", Integer, default=__defaults__["environment_to_id"]
)
function_1 = Column(
"fld_function_1", String(128), default=__defaults__["function_1"]
)
function_2 = Column(
"fld_function_2", String(128), default=__defaults__["function_2"]
)
function_3 = Column(
"fld_function_3", String(128), default=__defaults__["function_3"]
)
function_4 = Column(
"fld_function_4", String(128), default=__defaults__["function_4"]
)
function_5 = Column(
"fld_function_5", String(128), default=__defaults__["function_5"]
)
similar_item_method_id = Column(
"fld_similar_item_method_id",
Integer,
default=__defaults__["similar_item_method_id"],
)
parent_id = Column("fld_parent_id", Integer, default=__defaults__["parent_id"])
quality_from_id = Column(
"fld_quality_from_id", Integer, default=__defaults__["quality_from_id"]
)
quality_to_id = Column(
"fld_quality_to_id", Integer, default=__defaults__["quality_to_id"]
)
result_1 = Column("fld_result_1", Float, default=__defaults__["result_1"])
result_2 = Column("fld_result_2", Float, default=__defaults__["result_2"])
result_3 = Column("fld_result_3", Float, default=__defaults__["result_3"])
result_4 = Column("fld_result_4", Float, default=__defaults__["result_4"])
result_5 = Column("fld_result_5", Float, default=__defaults__["result_5"])
temperature_from = Column(
"fld_temperature_from", Float, default=__defaults__["temperature_from"]
)
temperature_to = Column(
"fld_temperature_to", Float, default=__defaults__["temperature_to"]
)
user_blob_1 = Column("fld_user_blob_1", String, default=__defaults__["user_blob_1"])
user_blob_2 = Column("fld_user_blob_2", String, default=__defaults__["user_blob_2"])
user_blob_3 = Column("fld_user_blob_3", String, default=__defaults__["user_blob_3"])
user_blob_4 = Column("fld_user_blob_4", String, default=__defaults__["user_blob_4"])
user_blob_5 = Column("fld_user_blob_5", String, default=__defaults__["user_blob_5"])
user_float_1 = Column(
"fld_user_float_1", Float, default=__defaults__["user_float_1"]
)
user_float_2 = Column(
"fld_user_float_2", Float, default=__defaults__["user_float_2"]
)
user_float_3 = Column(
"fld_user_float_3", Float, default=__defaults__["user_float_3"]
)
user_float_4 = Column(
"fld_user_float_4", Float, default=__defaults__["user_float_4"]
)
user_float_5 = Column(
"fld_user_float_5", Float, default=__defaults__["user_float_5"]
)
user_int_1 = Column("fld_user_int_1", Integer, default=__defaults__["user_int_1"])
user_int_2 = Column("fld_user_int_2", Integer, default=__defaults__["user_int_2"])
user_int_3 = Column("fld_user_int_3", Integer, default=__defaults__["user_int_3"])
user_int_4 = Column("fld_user_int_4", Integer, default=__defaults__["user_int_4"])
user_int_5 = Column("fld_user_int_5", Integer, default=__defaults__["user_int_5"])
# Define the relationships to other tables in the RAMSTK Program database.
def get_attributes(self):
"""Retrieve current values of RAMSTKSimilarItem data model attributes.
:return: {hardware_id, change_description_1, change_description_2,
change_description_3, change_description_4,
change_description_5, change_description_6,
change_description_7, change_description_8,
change_description_9, change_description_10, change_factor_1,
change_factor_2, change_factor_3, change_factor_4,
change_factor_5, change_factor_6, change_factor_7,
change_factor_8, change_factor_9, change_factor_10,
environment_from_id, environment_to_id, function_1,
function_2, function_3, function_4, function_5,
similar_item_method_id, parent_id, quality_from_id,
quality_to_id, result_1, result_2, result_3, result_4,
result_5, temperature_from, temperature_to, user_blob_1,
user_blob_2, user_blob_3, user_blob_4, user_blob_5,
user_float_1, user_float_2, user_float_3, user_float_4,
user_float_5, user_int_1, user_int_2, user_int_3, user_int_4,
user_int_5}
:rtype: tuple
"""
_attributes = {
"hardware_id": self.hardware_id,
"change_description_1": self.change_description_1,
"change_description_2": self.change_description_2,
"change_description_3": self.change_description_3,
"change_description_4": self.change_description_4,
"change_description_5": self.change_description_5,
"change_description_6": self.change_description_6,
"change_description_7": self.change_description_7,
"change_description_8": self.change_description_8,
"change_description_9": self.change_description_9,
"change_description_10": self.change_description_10,
"change_factor_1": self.change_factor_1,
"change_factor_2": self.change_factor_2,
"change_factor_3": self.change_factor_3,
"change_factor_4": self.change_factor_4,
"change_factor_5": self.change_factor_5,
"change_factor_6": self.change_factor_6,
"change_factor_7": self.change_factor_7,
"change_factor_8": self.change_factor_8,
"change_factor_9": self.change_factor_9,
"change_factor_10": self.change_factor_10,
"environment_from_id": self.environment_from_id,
"environment_to_id": self.environment_to_id,
"function_1": self.function_1,
"function_2": self.function_2,
"function_3": self.function_3,
"function_4": self.function_4,
"function_5": self.function_5,
"similar_item_method_id": self.similar_item_method_id,
"parent_id": self.parent_id,
"quality_from_id": self.quality_from_id,
"quality_to_id": self.quality_to_id,
"result_1": self.result_1,
"result_2": self.result_2,
"result_3": self.result_3,
"result_4": self.result_4,
"result_5": self.result_5,
"temperature_from": self.temperature_from,
"temperature_to": self.temperature_to,
"user_blob_1": self.user_blob_1,
"user_blob_2": self.user_blob_2,
"user_blob_3": self.user_blob_3,
"user_blob_4": self.user_blob_4,
"user_blob_5": self.user_blob_5,
"user_float_1": self.user_float_1,
"user_float_2": self.user_float_2,
"user_float_3": self.user_float_3,
"user_float_4": self.user_float_4,
"user_float_5": self.user_float_5,
"user_int_1": self.user_int_1,
"user_int_2": self.user_int_2,
"user_int_3": self.user_int_3,
"user_int_4": self.user_int_4,
"user_int_5": self.user_int_5,
}
return _attributes
| 40.875
| 88
| 0.656159
|
14a6d4a1cf52f24f6204f7b12efdec6db1bd3161
| 4,278
|
py
|
Python
|
ml/tf/ref.py
|
m-ahmadi/exref
|
1f76ea029995d2f60f19443b29c04c7628125ce3
|
[
"MIT"
] | 9
|
2019-08-28T16:06:21.000Z
|
2022-01-31T10:36:08.000Z
|
ml/tf/ref.py
|
m-ahmadi/exref
|
1f76ea029995d2f60f19443b29c04c7628125ce3
|
[
"MIT"
] | 1
|
2022-02-23T05:50:57.000Z
|
2022-02-25T16:56:02.000Z
|
ml/tf/ref.py
|
m-ahmadi/exref
|
1f76ea029995d2f60f19443b29c04c7628125ce3
|
[
"MIT"
] | 5
|
2019-08-28T16:06:23.000Z
|
2022-02-19T20:24:41.000Z
|
import tensorflow as tf
model = tf.keras.Sequential(layers=None|[], name=None)
model.compile(optimizer='rmsprop', loss=None|fn|''|Loss, metrics=None, loss_weights=None, weighted_metrics=None, run_eagerly=None, steps_per_execution=None, **kwargs)
model.compile(optimizer='sgd', loss='mse')
model.fit(
x=None | arr<numpy> | list< arr<numpy> > | {'input':[]|Tensor} | tf.data | Sequence | DatasetCreator | ParameterServerStrategy,
y=None | ...,
batch_size=None, epochs=1, verbose=1|0|2|'auto',
callbacks=None, validation_split=0.0, validation_data=None, shuffle=True,
class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None,
validation_steps=None, validation_batch_size=None, validation_freq=1,
max_queue_size=10, workers=1, use_multiprocessing=False
)
model.predict(x, batch_size=None, verbose=0, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False)
model.save(filepath='', overwrite=True, include_optimizer=True, save_format=None, signatures=None, options=None, save_traces=True)
json_string = model.to_json(**kwargs)
model.summary()
tf.keras.models.model_from_json(json_string='', custom_objects=None)
tf.keras.models.load_model(filepath='', custom_objects=None, compile=True, options=None)
tf.keras.models.save_model(model, filepath, overwrite=True, include_optimizer=True, save_format=None, signatures=None, options=None, save_traces=True)
tf.saved_model.save(obj=tf.Module|tf.train.Checkpoint, export_dir='', signatures=None, options=None)
tf.keras.layers.Dense(
units=positive_integer, activation=None, use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros', kernel_regularizer=None,
bias_regularizer=None, activity_regularizer=None, kernel_constraint=None,
bias_constraint=None, **kwargs
)
tf.keras.layers.Dense(8, input_shape=(16,)) # kwarg `input_shape` implicitly creates an input layer to insert before the current layer (same as explicitly define `InputLayer`)
tf.keras.layers.Flatten(data_format=None, **kwargs)
tf.keras.layers.InputLayer(
input_shape=(int,..)|TensorShape, batch_size=None, dtype=None, ?input_tensor=None, sparse=False,
?name='', ragged=False, type_spec=None, **kwargs
)
tf.keras.losses.
BinaryCrossentropy(from_logits=False, label_smoothing=0, axis=-1, reduction=losses_utils.ReductionV2.AUTO, name='binary_crossentropy')
CategoricalCrossentropy(from_logits=False, label_smoothing=0, axis=-1, reduction=losses_utils.ReductionV2.AUTO, name='categorical_crossentropy')
CategoricalHinge(reduction=losses_utils.ReductionV2.AUTO, name='categorical_hinge')
CosineSimilarity(axis=-1, reduction=losses_utils.ReductionV2.AUTO, name='cosine_similarity')
Hinge(reduction=losses_utils.ReductionV2.AUTO, name='hinge')
Huber(delta=1.0, reduction=losses_utils.ReductionV2.AUTO, name='huber_loss')
KLDivergence(reduction=losses_utils.ReductionV2.AUTO, name='kl_divergence')
LogCosh(reduction=losses_utils.ReductionV2.AUTO, name='log_cosh')
Loss(reduction=losses_utils.ReductionV2.AUTO, name=None)
MeanAbsoluteError(reduction=losses_utils.ReductionV2.AUTO, name='mean_absolute_error')
MeanAbsolutePercentageError(reduction=losses_utils.ReductionV2.AUTO, name='mean_absolute_percentage_error')
MeanSquaredError(reduction=losses_utils.ReductionV2.AUTO, name='mean_squared_error')
MeanSquaredLogarithmicError(reduction=losses_utils.ReductionV2.AUTO, name='mean_squared_logarithmic_error')
Poisson(reduction=losses_utils.ReductionV2.AUTO, name='poisson')
SparseCategoricalCrossentropy(from_logits=False, reduction=losses_utils.ReductionV2.AUTO, name='sparse_categorical_crossentropy')
SquaredHinge(reduction=losses_utils.ReductionV2.AUTO, name='squared_hinge')
tf.function(
func=None, input_signature=None, autograph=True, jit_compile=None,
experimental_implements=None, experimental_autograph_options=None,
experimental_relax_shapes=False, experimental_follow_type_hints=None
) -> tf.types.experimental.GenericFunction
tf.int32 is tf.dtypes.int32 # True
tf.TensorSpec(shape=TensorShape, dtype=tf.float32, name=None)
tf.TensorShape(dims=[int,..]|None)
tf.constant(value=num|[], dtype=None|''|tf.float32..., shape=None|(int,..), name='Const')
tf.zeros(shape=list<int> | tuple<int> | Tensor1D<int32>, dtype=tf.float32, ?name=None|'')
| 55.558442
| 175
| 0.800608
|
41523fb27aa5c11e11189d2a1361b39bb4aee5ba
| 134
|
py
|
Python
|
dnnv/verifiers/common/reductions/iopolytope/errors.py
|
samysweb/dnnv
|
58fb95b7300914d9da28eed86c39eca473b1aaef
|
[
"MIT"
] | 5
|
2022-01-28T20:30:34.000Z
|
2022-03-17T09:26:52.000Z
|
dnnv/verifiers/common/reductions/iopolytope/errors.py
|
samysweb/dnnv
|
58fb95b7300914d9da28eed86c39eca473b1aaef
|
[
"MIT"
] | 9
|
2022-01-27T03:50:28.000Z
|
2022-02-08T18:42:17.000Z
|
dnnv/verifiers/common/reductions/iopolytope/errors.py
|
samysweb/dnnv
|
58fb95b7300914d9da28eed86c39eca473b1aaef
|
[
"MIT"
] | 2
|
2022-02-03T17:32:43.000Z
|
2022-03-24T16:38:49.000Z
|
from ..base import ReductionError
class IOPolytopeReductionError(ReductionError):
pass
__all__ = ["IOPolytopeReductionError"]
| 14.888889
| 47
| 0.791045
|
b85db88727362309a261875f490fd25aa98c4e76
| 93
|
py
|
Python
|
Basic Programming/Function and array/digitfrequency.py
|
therohitsingh/Top300DSACode
|
e96b2ff833677d73ad197afcb39146969010315a
|
[
"MIT"
] | null | null | null |
Basic Programming/Function and array/digitfrequency.py
|
therohitsingh/Top300DSACode
|
e96b2ff833677d73ad197afcb39146969010315a
|
[
"MIT"
] | null | null | null |
Basic Programming/Function and array/digitfrequency.py
|
therohitsingh/Top300DSACode
|
e96b2ff833677d73ad197afcb39146969010315a
|
[
"MIT"
] | null | null | null |
n = input()
k = input()
count = 0
for i in n:
if k==i:
count+=1
print(count)
| 13.285714
| 16
| 0.483871
|
386d6ed0a3833704c6dc8078535bc19f9d4ee78e
| 9,945
|
py
|
Python
|
tests/p2p/test_forkid.py
|
AndreMiras/trinity
|
6c20e2b63a698d345c282db8ab0cd426f4329ff5
|
[
"MIT"
] | null | null | null |
tests/p2p/test_forkid.py
|
AndreMiras/trinity
|
6c20e2b63a698d345c282db8ab0cd426f4329ff5
|
[
"MIT"
] | null | null | null |
tests/p2p/test_forkid.py
|
AndreMiras/trinity
|
6c20e2b63a698d345c282db8ab0cd426f4329ff5
|
[
"MIT"
] | null | null | null |
import sys
import pytest
import rlp
from eth_utils import to_bytes
from eth.chains.mainnet import MAINNET_VM_CONFIGURATION
from eth.chains.ropsten import ROPSTEN_VM_CONFIGURATION
from p2p.exceptions import RemoteChainIsStale, LocalChainIncompatibleOrStale
from p2p.forkid import ForkID, make_forkid, validate_forkid
MAINNET_GENESIS_HASH = to_bytes(
hexstr='0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3')
ROPSTEN_GENESIS_HASH = to_bytes(
hexstr='0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d')
@pytest.mark.parametrize(
'head,expected_forkid',
[
(0, ForkID(hash=to_bytes(hexstr='0xfc64ec04'), next=1150000)), # Unsynced
(1149999, ForkID(hash=to_bytes(hexstr='0xfc64ec04'), next=1150000)), # Last Frontier
(1150000, ForkID(hash=to_bytes(hexstr='0x97c2c34c'), next=1920000)), # First Homestead
(1919999, ForkID(hash=to_bytes(hexstr='0x97c2c34c'), next=1920000)), # Last Homestead
(1920000, ForkID(hash=to_bytes(hexstr='0x91d1f948'), next=2463000)), # First DAO block
(2462999, ForkID(hash=to_bytes(hexstr='0x91d1f948'), next=2463000)), # Last DAO block
(2463000, ForkID(hash=to_bytes(hexstr='0x7a64da13'), next=2675000)), # First Tangerine
(2674999, ForkID(hash=to_bytes(hexstr='0x7a64da13'), next=2675000)), # Last Tangerine
(2675000, ForkID(hash=to_bytes(hexstr='0x3edd5b10'), next=4370000)), # First Spurious
(4369999, ForkID(hash=to_bytes(hexstr='0x3edd5b10'), next=4370000)), # Last Spurious
(4370000, ForkID(hash=to_bytes(hexstr='0xa00bc324'), next=7280000)), # First Byzantium
(7279999, ForkID(hash=to_bytes(hexstr='0xa00bc324'), next=7280000)), # Last Byzantium
# First and last Constantinople, first Petersburg block
(7280000, ForkID(hash=to_bytes(hexstr='0x668db0af'), next=9069000)),
(9068999, ForkID(hash=to_bytes(hexstr='0x668db0af'), next=9069000)), # Last Petersburg
# First Istanbul and first Muir Glacier block
(9069000, ForkID(hash=to_bytes(hexstr='0x879d6e30'), next=9200000)),
# Last Istanbul and first Muir Glacier block
(9199999, ForkID(hash=to_bytes(hexstr='0x879d6e30'), next=9200000)),
(9200000, ForkID(hash=to_bytes(hexstr='0xe029e991'), next=0)), # First Muir Glacier block
(10000000, ForkID(hash=to_bytes(hexstr='0xe029e991'), next=0)), # Future Muir Glacier block
]
)
def test_mainnet_forkids(head, expected_forkid):
_test_make_forkid(MAINNET_VM_CONFIGURATION, MAINNET_GENESIS_HASH, head, expected_forkid)
@pytest.mark.parametrize(
'head,expected_forkid',
[
# Unsynced, last Frontier, Homestead and first Tangerine block
(0, ForkID(hash=to_bytes(hexstr='0x30c7ddbc'), next=10)),
(9, ForkID(hash=to_bytes(hexstr='0x30c7ddbc'), next=10)), # Last Tangerine block
(10, ForkID(hash=to_bytes(hexstr='0x63760190'), next=1700000)), # First Spurious block
(1699999, ForkID(hash=to_bytes(hexstr='0x63760190'), next=1700000)), # Last Spurious block
(1700000, ForkID(hash=to_bytes(hexstr='0x3ea159c7'), next=4230000)), # First Byzantium
(4229999, ForkID(hash=to_bytes(hexstr='0x3ea159c7'), next=4230000)), # Last Byzantium
(4230000, ForkID(hash=to_bytes(hexstr='0x97b544f3'), next=4939394)), # First Constantinople
(4939393, ForkID(hash=to_bytes(hexstr='0x97b544f3'), next=4939394)), # Last Constantinople
(4939394, ForkID(hash=to_bytes(hexstr='0xd6e2149b'), next=6485846)), # First Petersburg
(6485845, ForkID(hash=to_bytes(hexstr='0xd6e2149b'), next=6485846)), # Last Petersburg
(6485846, ForkID(hash=to_bytes(hexstr='0x4bc66396'), next=7117117)), # First Istanbul
(7117116, ForkID(hash=to_bytes(hexstr='0x4bc66396'), next=7117117)), # Last Istanbul block
(7117117, ForkID(hash=to_bytes(hexstr='0x6727ef90'), next=0)), # First Muir Glacier block
(7500000, ForkID(hash=to_bytes(hexstr='0x6727ef90'), next=0)), # Future
]
)
def test_ropsten_forkids(head, expected_forkid):
_test_make_forkid(ROPSTEN_VM_CONFIGURATION, ROPSTEN_GENESIS_HASH, head, expected_forkid)
def _test_make_forkid(vm_config, genesis_hash, head, expected_forkid):
forkid = make_forkid(genesis_hash, head, vm_config)
assert forkid.hash == expected_forkid.hash
assert forkid.next == expected_forkid.next
def test_forkid():
forkid = ForkID(hash=b'\xe0)\xe9\x91', next=999)
assert forkid.hash == b'\xe0)\xe9\x91'
assert forkid.next == 999
# A hash with length diffrent than 4 is not allowed.
with pytest.raises(ValueError):
forkid = ForkID(hash=b'\x00\x00\x00\x02Q\xc0', next=0)
with pytest.raises(ValueError):
forkid = ForkID(hash=b'\x02Q\xc0', next=0)
@pytest.mark.parametrize(
'local_head,remote_forkid,expected_error',
[
# Local is mainnet Petersburg, remote announces the same. No future fork is announced.
(7987396, ForkID(hash=to_bytes(hexstr='0x668db0af'), next=0), None),
# Local is mainnet Petersburg, remote announces the same. Remote also announces a next fork
# at block 0xffffffff, but that is uncertain.
(7987396, ForkID(hash=to_bytes(hexstr='0x668db0af'), next=sys.maxsize), None),
# Local is mainnet currently in Byzantium only (so it's aware of Petersburg), remote
# announces also Byzantium, but it's not yet aware of Petersburg (e.g. non updated node
# before the fork). In this case we don't know if Petersburg passed yet or not.
(7279999, ForkID(hash=to_bytes(hexstr='0xa00bc324'), next=0), None),
# Local is mainnet currently in Byzantium only (so it's aware of Petersburg), remote
# announces also Byzantium, and it's also aware of Petersburg (e.g. updated node before
# the fork). We don't know if Petersburg passed yet (will pass) or not.
(7279999, ForkID(hash=to_bytes(hexstr='0xa00bc324'), next=7280000), None),
# Local is mainnet currently in Byzantium only (so it's aware of Petersburg), remote
# announces also Byzantium, and it's also aware of some random fork (e.g. misconfigured
# Petersburg). As neither forks passed at neither nodes, they may mismatch, but we still
# connect for now.
(7279999, ForkID(hash=to_bytes(hexstr='0xa00bc324'), next=sys.maxsize), None),
# Local is mainnet Petersburg, remote announces Byzantium + knowledge about Petersburg.
# Remote is simply out of sync, accept.
(7987396, ForkID(hash=to_bytes(hexstr='0xa00bc324'), next=7280000), None),
# Local is mainnet Petersburg, remote announces Spurious + knowledge about Byzantium.
# Remote is definitely out of sync. It may or may not need the Petersburg update, we don't
# know yet.
(7987396, ForkID(hash=to_bytes(hexstr='0x3edd5b10'), next=4370000), None),
# Local is mainnet Byzantium, remote announces Petersburg. Local is out of sync, accept.
(7279999, ForkID(hash=to_bytes(hexstr='0x668db0af'), next=0), None),
# Local is mainnet Spurious, remote announces Byzantium, but is not aware of Petersburg.
# Local out of sync. Local also knows about a future fork, but that is uncertain yet.
(4369999, ForkID(hash=to_bytes(hexstr='0xa00bc324'), next=0), None),
# Local is mainnet Petersburg. remote announces Byzantium but is not aware of further forks.
# Remote needs software update.
(7987396, ForkID(hash=to_bytes(hexstr='0xa00bc324'), next=0), RemoteChainIsStale),
# Local is mainnet Petersburg, and isn't aware of more forks. Remote announces Petersburg +
# 0xffffffff. Local needs software update, reject.
(7987396,
ForkID(hash=to_bytes(hexstr='0x5cddc0e1'), next=0),
LocalChainIncompatibleOrStale),
# Local is mainnet Byzantium, and is aware of Petersburg. Remote announces Petersburg +
# 0xffffffff. Local needs software update, reject.
(7279999,
ForkID(hash=to_bytes(hexstr='0x5cddc0e1'), next=0),
LocalChainIncompatibleOrStale),
# Local is mainnet Petersburg, remote is Rinkeby Petersburg.
(7987396,
ForkID(hash=to_bytes(hexstr='0xafec6b27'), next=0),
LocalChainIncompatibleOrStale),
# Local is mainnet Muir Glacier, far in the future. Remote announces Gopherium (non
# existing fork) at some future block 88888888, for itself, but past block for local.
# Local is incompatible.
#
# This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
(88888888,
ForkID(hash=to_bytes(hexstr='0xe029e991'), next=88888888),
LocalChainIncompatibleOrStale),
# Local is mainnet Byzantium. Remote is also in Byzantium, but announces Gopherium (non
# existing fork) at block 7279999, before Petersburg. Local is incompatible.
(7279999,
ForkID(hash=to_bytes(hexstr='0xa00bc324'), next=7279999),
LocalChainIncompatibleOrStale),
]
)
def test_forkid_validation(local_head, remote_forkid, expected_error):
if expected_error:
with pytest.raises(expected_error):
validate_forkid(
remote_forkid, MAINNET_GENESIS_HASH, local_head, MAINNET_VM_CONFIGURATION)
else:
validate_forkid(remote_forkid, MAINNET_GENESIS_HASH, local_head, MAINNET_VM_CONFIGURATION)
@pytest.mark.parametrize(
'forkid,expected_rlp',
[
(ForkID(hash=to_bytes(hexstr='0x00000000'), next=0), to_bytes(hexstr='0xc6840000000080')),
(ForkID(hash=to_bytes(hexstr='0xdeadbeef'), next=int(0xBADDCAFE)),
to_bytes(hexstr='0xca84deadbeef84baddcafe')),
]
)
def test_rlp_encoding(forkid, expected_rlp):
assert rlp.encode(forkid) == expected_rlp
assert rlp.decode(expected_rlp, sedes=ForkID) == forkid
| 51.796875
| 100
| 0.698441
|
82e6f3a4a6f5e0a8f08f7aad0299819ddf983a3c
| 2,162
|
py
|
Python
|
tests/tasks/test_aws_athena_cleaner_task.py
|
jezd-axyl/platsec-aws-scanner
|
bc2b064c87ac2f77fab49c1e1eb3782d6de685b2
|
[
"Apache-2.0"
] | null | null | null |
tests/tasks/test_aws_athena_cleaner_task.py
|
jezd-axyl/platsec-aws-scanner
|
bc2b064c87ac2f77fab49c1e1eb3782d6de685b2
|
[
"Apache-2.0"
] | 4
|
2021-05-06T12:36:46.000Z
|
2022-02-11T09:47:57.000Z
|
tests/tasks/test_aws_athena_cleaner_task.py
|
jezd-axyl/platsec-aws-scanner
|
bc2b064c87ac2f77fab49c1e1eb3782d6de685b2
|
[
"Apache-2.0"
] | 2
|
2021-04-21T04:48:47.000Z
|
2022-01-14T04:29:17.000Z
|
from unittest import TestCase
from unittest.mock import Mock, call
from src.tasks.aws_athena_cleaner_task import AwsAthenaCleanerTask
from tests.test_types_generator import account, task_report
class TestAwsAthenaCleanerTask(TestCase):
database_mappings = {
"db_1": ["table_1", "table_2", "table_3"],
"some_prefix_db_2": ["table_1", "table_2"],
"db_3": ["table_1"],
"some_prefix_db_4": ["table_1", "table_2", "table_3"],
"some_prefix_db_5": [],
}
expected_report = task_report(
account=account("555666777888", "athena"),
description="clean scanner leftovers",
partition=None,
results={
"dropped_tables": [
"some_prefix_db_2.table_1",
"some_prefix_db_2.table_2",
"some_prefix_db_4.table_1",
"some_prefix_db_4.table_2",
"some_prefix_db_4.table_3",
],
"dropped_databases": ["some_prefix_db_2", "some_prefix_db_4", "some_prefix_db_5"],
},
)
def test_clean_task_databases(self) -> None:
mock_athena = Mock(
list_databases=Mock(return_value=list(self.database_mappings.keys())),
list_tables=Mock(side_effect=lambda db: self.database_mappings.get(db)),
)
self.assertEqual(self.expected_report, AwsAthenaCleanerTask().run(mock_athena))
mock_athena.assert_has_calls(
[
call.list_databases(),
call.list_tables("some_prefix_db_2"),
call.drop_table("some_prefix_db_2", "table_1"),
call.drop_table("some_prefix_db_2", "table_2"),
call.list_tables("some_prefix_db_4"),
call.drop_table("some_prefix_db_4", "table_1"),
call.drop_table("some_prefix_db_4", "table_2"),
call.drop_table("some_prefix_db_4", "table_3"),
call.list_tables("some_prefix_db_5"),
call.drop_database("some_prefix_db_2"),
call.drop_database("some_prefix_db_4"),
call.drop_database("some_prefix_db_5"),
]
)
| 39.309091
| 94
| 0.604533
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.