hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b1d9229c4403977c0473eb8589e173976ce4e166
| 1,779
|
py
|
Python
|
decrypt.py
|
AkshayJainG/ContinuousIntrusion
|
c12f8ce113b1141d3d4647f2de20f943906973e8
|
[
"MIT"
] | 120
|
2015-11-30T16:27:31.000Z
|
2021-08-18T13:38:42.000Z
|
decrypt.py
|
AkshayJainG/ContinuousIntrusion
|
c12f8ce113b1141d3d4647f2de20f943906973e8
|
[
"MIT"
] | null | null | null |
decrypt.py
|
AkshayJainG/ContinuousIntrusion
|
c12f8ce113b1141d3d4647f2de20f943906973e8
|
[
"MIT"
] | 38
|
2015-11-14T02:22:23.000Z
|
2020-09-02T04:51:08.000Z
|
#!/usr/bin/env python
import re
import sys
import base64
from hashlib import sha256
from binascii import hexlify, unhexlify
from Crypto.Cipher import AES
MAGIC = "::::MAGIC::::"
def usage():
print "./decrypt.py <master.key> <hudson.util.Secret> <credentials.xml>"
sys.exit(0)
def main():
if len(sys.argv) != 4:
usage()
master_key = open(sys.argv[1]).read()
hudson_secret_key = open(sys.argv[2], 'rb').read()
hashed_master_key = sha256(master_key).digest()[:16]
o = AES.new(hashed_master_key, AES.MODE_ECB)
x = o.decrypt(hudson_secret_key)
assert MAGIC in x
k = x[:-16]
k = k[:16]
credentials = open(sys.argv[3]).read()
passwords = re.findall(r'<password>(.*?)</password>', credentials)
for password in passwords:
p = base64.decodestring(password)
o = AES.new(k, AES.MODE_ECB)
x = o.decrypt(p)
assert MAGIC in x
print re.findall('(.*)' + MAGIC, x)[0]
passphrases = re.findall(r'<passphrase>(.*?)</passphrase>', credentials)
for passphrase in passphrases:
p = base64.decodestring(passphrase)
o = AES.new(k, AES.MODE_ECB)
x = o.decrypt(p)
assert MAGIC in x
print re.findall('(.*)' + MAGIC, x)[0]
privatekeys = re.findall(r'<privateKey>(.*?)</privateKey>', credentials)
for privatekey in privatekeys:
p = base64.decodestring(privatekey)
o = AES.new(k, AES.MODE_ECB)
x = o.decrypt(p)
print x
assert MAGIC in x
print re.findall('(.*)' + MAGIC, x)[0]
bindpasswords = re.findall(r'<bindPassword>(.*?)</bindPassword>', credentials)
for bindpassword in bindpasswords:
p = base64.decodestring(bindpassword)
o = AES.new(k, AES.MODE_ECB)
x = o.decrypt(p)
assert MAGIC in x
print re.findall('(.*)' + MAGIC, x)[0]
if __name__ == '__main__':
main()
| 26.161765
| 80
| 0.648679
|
f79e9b21718f90eb346ee457caf3cc4a8c2c683b
| 2,289
|
py
|
Python
|
A1014280203/25/25.py
|
saurabh896/python-1
|
f8d3aedf4c0fe6e24dfa3269ea7e642c9f7dd9b7
|
[
"MIT"
] | 3,976
|
2015-01-01T15:49:39.000Z
|
2022-03-31T03:47:56.000Z
|
A1014280203/25/25.py
|
oyesam7/python-1
|
220734af09fa09a6f615d4f1b4612a0ab75d91d1
|
[
"MIT"
] | 97
|
2015-01-11T02:59:46.000Z
|
2022-03-16T14:01:56.000Z
|
A1014280203/25/25.py
|
oyesam7/python-1
|
220734af09fa09a6f615d4f1b4612a0ab75d91d1
|
[
"MIT"
] | 3,533
|
2015-01-01T06:19:30.000Z
|
2022-03-28T13:14:54.000Z
|
# make with baudi SDK
import requests
import pyaudio
import wave
import base64
import json
import win32api
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 8000
RECORD_SECONDS = 3
WAVE_OUTPUT_FILENAME = "out.wav"
def make_audio():
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
print('*recording')
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print('*done recording')
stream.stop_stream()
stream.close()
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
CC_URL = "https://openapi.baidu.com/oauth/2.0/token?" \
"grant_type=client_credentials&" \
"client_id=&" \
"client_secret=&"
TOKEN = ""
API = 'http://vop.baidu.com/server_api'
def get_token():
resp = requests.post(CC_URL)
print(resp.json())
def speech_to_text():
with open(WAVE_OUTPUT_FILENAME, 'rb') as file:
data = file.read()
params = {
"format": 'wav',
"rate": RATE,
"channel": CHANNELS,
"token": TOKEN,
"cuid": "",
"len": len(data),
"speech": base64.b64encode(data).decode(),
}
headers = {
'Content-Type': 'application/json;',
}
resp = requests.post(url=API, data=json.dumps(params), headers=headers)
# print(resp.json())
return resp.json()['result']
def make_action(texts):
maps = {
'百度': 'http://www.baidu.com',
'网易': 'http://www.163.com'
}
target = ''
for text in texts:
if text.find('百度') != -1:
target = '百度'
elif text.find('网易') != -1:
target = '网易'
if target:
win32api.ShellExecute(0, 'open', maps[target], '', '', 1)
else:
print('Match failed:', texts)
if __name__ == '__main__':
make_audio()
texts = speech_to_text()
make_action(texts)
| 24.094737
| 76
| 0.549585
|
71088483f30887944da400e7d7f60ebc97c7a86c
| 2,098
|
py
|
Python
|
lib/model/utils/blob.py
|
galaxy-fangfang/deconv.pytorch
|
1415638d39c380d6d4567735965e87416b31f73b
|
[
"MIT"
] | null | null | null |
lib/model/utils/blob.py
|
galaxy-fangfang/deconv.pytorch
|
1415638d39c380d6d4567735965e87416b31f73b
|
[
"MIT"
] | null | null | null |
lib/model/utils/blob.py
|
galaxy-fangfang/deconv.pytorch
|
1415638d39c380d6d4567735965e87416b31f73b
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Blob helper functions."""
import numpy as np
# from scipy.misc import imread, imresize
import cv2
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
def im_list_to_blob(ims):
"""Convert a list of images into a network input.
Assumes images are already prepared (means subtracted, BGR order, ...).
"""
max_shape = np.array([im.shape for im in ims]).max(axis=0)
num_images = len(ims)
blob = np.zeros((num_images, max_shape[0], max_shape[1], 3),
dtype=np.float32)
for i in xrange(num_images):
im = ims[i]
blob[i, 0:im.shape[0], 0:im.shape[1], :] = im
return blob
def prep_im_for_blob(im, pixel_means, target_size, max_size):
"""Mean subtract and scale an image for use in a blob."""
im = im.astype(np.float32, copy=False)
im -= pixel_means
# im = im[:, :, ::-1]
im_shape = im.shape
# im_size_min = np.min(im_shape[0:2])
# im_size_max = np.max(im_shape[0:2])
# Prevent the biggest axis from being more than MAX_SIZE
# if np.round(im_scale * im_size_max) > max_size:
# im_scale = float(max_size) / float(im_size_max)
# im = imresize(im, im_scale)
if(im_shape[0]>im_shape[1]):
im_scale1 = float(1024) / float(im_shape[0])
im_scale2 = float(642) / float(im_shape[1])
else:
im_scale1 = float(642) / float(im_shape[0])
im_scale2 = float(1024) / float(im_shape[1])
im = cv2.resize(im, None, None, fx=im_scale2, fy=im_scale1,
interpolation=cv2.INTER_LINEAR)
# import ipdb
# ipdb.set_trace()
#################
# Edited by fangfang
# 1. make sure the input image has the fixed size:
# so that the deconvolutioned image can be added to the previous feature map
return im, im_scale1,im_scale2
| 31.787879
| 83
| 0.590562
|
7934877f1106921438d1ff14d5adae8d0ec9545e
| 9,416
|
py
|
Python
|
sudkampPython/transitions/extensions/diagrams.py
|
thundergolfer/sudkamp-langs-machines-python
|
04e07758464891dd9815a9578cf158638bab8f24
|
[
"MIT"
] | 8
|
2016-08-21T10:28:35.000Z
|
2017-05-30T12:33:50.000Z
|
sudkampPython/transitions/extensions/diagrams.py
|
thundergolfer/sudkamp-langs-machines-python
|
04e07758464891dd9815a9578cf158638bab8f24
|
[
"MIT"
] | 1
|
2018-02-27T05:10:19.000Z
|
2018-02-27T05:10:19.000Z
|
sudkampPython/transitions/extensions/diagrams.py
|
thundergolfer/sudkamp-langs-machines-python
|
04e07758464891dd9815a9578cf158638bab8f24
|
[
"MIT"
] | null | null | null |
import abc
from ..core import Machine
from ..core import Transition
from .nesting import NestedState
try:
import pygraphviz as pgv
except:
pgv = None
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class Diagram(object):
def __init__(self, machine):
self.machine = machine
@abc.abstractmethod
def get_graph(self):
return
class AGraph(Diagram):
machine_attributes = {
'directed': True,
'strict': False,
'rankdir': 'LR',
'ratio': '0.3'
}
style_attributes = {
'node': {
'default': {
'shape': 'circle',
'height': '1.2',
'style': 'filled',
'fillcolor': 'white',
'color': 'black',
},
'active': {
'color': 'red',
'fillcolor': 'darksalmon',
'shape': 'doublecircle'
},
'previous': {
'color': 'blue',
'fillcolor': 'azure2',
}
},
'edge': {
'default': {
'color': 'black',
},
'previous': {
'color': 'blue',
}
}
}
def __init__(self, *args, **kwargs):
self.seen = []
super(AGraph, self).__init__(*args, **kwargs)
def _add_nodes(self, states, container):
# to be able to process children recursively as well as the state dict of a machine
states = states.values() if isinstance(states, dict) else states
for state in states:
if state.name in self.seen:
continue
elif hasattr(state, 'children') and len(state.children) > 0:
self.seen.append(state.name)
sub = container.add_subgraph(name="cluster_" + state._name, label=state.name, rank='same')
self._add_nodes(state.children, sub)
else:
try:
shape = self.style_attributes['node']['default']['shape']
except KeyError:
shape = 'circle'
self.seen.append(state.name)
container.add_node(n=state.name, shape=shape)
def _add_edges(self, events, container):
for event in events.values():
label = str(event.name)
for transitions in event.transitions.items():
src = self.machine.get_state(transitions[0])
ltail = ''
if hasattr(src, 'children') and len(src.children) > 0:
ltail = 'cluster_' + src.name
src = src.children[0]
while len(src.children) > 0:
src = src.children
for t in transitions[1]:
dst = self.machine.get_state(t.dest)
edge_label = self._transition_label(label, t)
lhead = ''
if hasattr(dst, 'children') and len(dst.children) > 0:
lhead = 'cluster_' + dst.name
dst = dst.children[0]
while len(dst.children) > 0:
dst = src.children
# special case in which parent to first child edge is resolved to a self reference.
# will be omitted for now. I have not found a solution for how to fix this yet since having
# cluster to node edges is a bit messy with dot.
if dst.name == src.name and transitions[0] != t.dest:
continue
elif container.has_edge(src.name, dst.name):
edge = container.get_edge(src.name, dst.name)
edge.attr['label'] = edge.attr['label'] + ' | ' + edge_label
else:
container.add_edge(src.name, dst.name, label=edge_label, ltail=ltail, lhead=lhead)
def _transition_label(self, edge_label, tran):
if self.machine.show_conditions and tran.conditions:
return '{edge_label} [{conditions}]'.format(
edge_label=edge_label,
conditions=' & '.join(
c.func if c.target else '!' + c.func
for c in tran.conditions
),
)
return edge_label
def get_graph(self, title=None):
""" Generate a DOT graph with pygraphviz, returns an AGraph object
Args:
title (string): Optional title for the graph.
"""
if not pgv:
raise Exception('AGraph diagram requires pygraphviz')
if title is None:
title = self.__class__.__name__
elif title is False:
title = ''
fsm_graph = pgv.AGraph(label=title, compound=True, **self.machine_attributes)
fsm_graph.node_attr.update(self.style_attributes['node']['default'])
# For each state, draw a circle
self._add_nodes(self.machine.states, fsm_graph)
self._add_edges(self.machine.events, fsm_graph)
setattr(fsm_graph, 'style_attributes', self.style_attributes)
return fsm_graph
class GraphMachine(Machine):
_pickle_blacklist = ['graph']
def __getstate__(self):
return {k: v for k, v in self.__dict__.items() if k not in self._pickle_blacklist}
def __setstate__(self, state):
self.__dict__.update(state)
self.graph = self.get_graph(title=self.title)
self.set_node_style(self.graph.get_node(self.current_state.name), 'active')
def __init__(self, *args, **kwargs):
# remove graph config from keywords
self.title = kwargs.pop('title', 'State Machine')
self.show_conditions = kwargs.pop('show_conditions', False)
super(GraphMachine, self).__init__(*args, **kwargs)
# Create graph at beginning
self.graph = self.get_graph(title=self.title)
# Set initial node as active
self.set_node_state(self.initial, 'active')
def get_graph(self, title=None, force_new=False):
if title is None:
title = self.title
if not hasattr(self, 'graph') or force_new:
self.graph = AGraph(self).get_graph(title)
return self.graph
def set_edge_state(self, edge_from, edge_to, state='default'):
""" Mark a node as active by changing the attributes """
assert hasattr(self, 'graph')
edge = self.graph.get_edge(edge_from, edge_to)
# Reset all the edges
for e in self.graph.edges_iter():
self.set_edge_style(e, 'default')
try:
self.set_edge_style(edge, state)
except KeyError:
self.set_edge_style(edge, 'default')
def add_states(self, *args, **kwargs):
super(GraphMachine, self).add_states(*args, **kwargs)
self.graph = self.get_graph(force_new=True)
def add_transition(self, *args, **kwargs):
super(GraphMachine, self).add_transition(*args, **kwargs)
self.graph = self.get_graph(force_new=True)
def set_node_state(self, node_name=None, state='default', reset=False):
assert hasattr(self, 'graph')
if node_name is None:
node_name = self.state
if reset:
for n in self.graph.nodes_iter():
self.set_node_style(n, 'default')
if self.graph.has_node(node_name):
node = self.graph.get_node(node_name)
func = self.set_node_style
else:
path = node_name.split(NestedState.separator)
node = self.graph
while len(path) > 0:
node = node.get_subgraph('cluster_' + path.pop(0))
func = self.set_graph_style
try:
func(node, state)
except KeyError:
func(node, 'default')
def set_node_style(self, item, style='default'):
style_attr = self.graph.style_attributes.get('node', {}).get(style)
item.attr.update(style_attr)
def set_edge_style(self, item, style='default'):
style_attr = self.graph.style_attributes.get('edge', {}).get(style)
item.attr.update(style_attr)
def set_graph_style(self, item, style='default'):
style_attr = self.graph.style_attributes.get('node', {}).get(style)
item.graph_attr.update(style_attr)
@staticmethod
def _create_transition(*args, **kwargs):
return TransitionGraphSupport(*args, **kwargs)
class TransitionGraphSupport(Transition):
def _change_state(self, event_data):
# Mark the active node
dest = event_data.machine.get_state(self.dest)
event_data.machine.set_node_state(dest.name, state='active', reset=True)
# Mark the previous node and path used
if self.source is not None:
source = event_data.machine.get_state(self.source)
event_data.machine.set_node_state(source.name, state='previous')
if hasattr(source, 'children'):
while len(source.children) > 0:
source = source.children[0]
while len(dest.children) > 0:
dest = dest.children[0]
event_data.machine.set_edge_state(source.name, dest.name, state='previous')
super(TransitionGraphSupport, self)._change_state(event_data)
| 34.115942
| 111
| 0.562341
|
9ddf936faa7ba621e7a0b6c328e794576714fc89
| 5,886
|
py
|
Python
|
inferencia/task/object_detection/object_detection_2d/model/model/yolo_v4/yolo_v4.py
|
yuya-mochimaru-np/inferencia
|
e09f298d0a80672fc5bb9383e23c941290eff334
|
[
"Apache-2.0"
] | null | null | null |
inferencia/task/object_detection/object_detection_2d/model/model/yolo_v4/yolo_v4.py
|
yuya-mochimaru-np/inferencia
|
e09f298d0a80672fc5bb9383e23c941290eff334
|
[
"Apache-2.0"
] | 5
|
2021-07-25T23:19:29.000Z
|
2021-07-26T23:35:13.000Z
|
inferencia/task/object_detection/object_detection_2d/model/model/yolo_v4/yolo_v4.py
|
yuya-mochimaru-np/inferencia
|
e09f298d0a80672fc5bb9383e23c941290eff334
|
[
"Apache-2.0"
] | 1
|
2021-09-18T12:06:13.000Z
|
2021-09-18T12:06:13.000Z
|
import os.path as osp
from typing import Union
import numpy as np
import onnxruntime
from .process import (pre_process,
post_processing,
validate_bbox)
from ...object_detection_2d_model import ObjectDetection2DModel
from ...object_detection_2d_result import ObjectDetection2DResult
from ....label.object_detection_2d_label_factory import ObjectDetection2DLabelFactory
from .......util.file.file import get_model_path, download_from_google_drive
from .......util.logger.logger import Logger
from inferencia.util.pre_process.validate import validate_image
class YoloV4(ObjectDetection2DModel):
task_major_name = "ObjectDetection"
task_minor_name = "ObjectDetection2D"
model_name = "YoloV4"
model_detail_name = None
input_width = None
input_height = None
weight_url = None
def __init__(self,
model_path,
model_precision,
conf_thresh,
nms_thresh,
label_name):
self.logger = Logger(__class__.__name__)
init_msg = "\n===================== \n Initialize {}-{}-{} \n=====================\n".format(self.task_minor_name,
self.model_name,
self.model_detail_name)
self.logger.info(init_msg)
self.conf_thresh = conf_thresh
self.nms_thresh = nms_thresh
model_path = self.get_model_path(model_path,
self.task_major_name,
self.task_minor_name,
self.model_name,
self.model_detail_name,
model_precision)
self.download_model(self.weight_url, model_path)
self.sess = self.get_inference_session(model_path)
# First inference is too slow. Should be done here.
dummy_image = self.get_dummy_image()
self.inference(dummy_image)
self.logger.info("Initial inference")
label = ObjectDetection2DLabelFactory.create(label_name)
self.label_dict = label.to_json()
def get_model_path(self,
model_path,
task_major_name,
task_minor_name,
model_name,
model_detail_name,
model_precision):
if model_path is None:
model_path = get_model_path(task_major_name,
task_minor_name,
model_name,
model_detail_name,
model_precision)
else:
pass
return model_path
def download_model(self, weight_url, model_path):
if not osp.exists(model_path):
download_from_google_drive(weight_url, model_path)
msg = "download weight from {weight_url} and save to {model_path}".format(weight_url=weight_url,
model_path=model_path)
self.logger.info(msg)
def get_inference_session(self, model_path):
return onnxruntime.InferenceSession(model_path)
def inference(self,
images: Union[np.ndarray, list]) -> list:
pre_proc_rets, image_sizes = self.pre_process(images)
fwd_rets = self.forward(pre_proc_rets)
post_proc_rets = self.post_process(fwd_rets, image_sizes)
return post_proc_rets
def pre_process(self, images):
images = validate_image(images)
pre_proc_rets, image_sizes = pre_process(images,
self.input_width,
self.input_height)
return pre_proc_rets, image_sizes
def forward(self, images):
output = self.sess.run(None, {self.sess.get_inputs()[0].name: images})
return output
def post_process(self, fwd_rets, image_sizes):
frames_boxes = post_processing(fwd_rets,
self.conf_thresh,
self.nms_thresh)
obj_det_rets = []
for frame_boxes, image_size in zip(frames_boxes, image_sizes):
for box in frame_boxes:
image_height, image_width, _ = image_size
xmin, ymin, xmax, ymax, confidence, _, class_id = box
xmin, ymin, xmax, ymax = validate_bbox(xmin,
ymin,
xmax,
ymax,
image_height,
image_width)
obj_det_ret = ObjectDetection2DResult(class_id,
self.label_dict[class_id],
xmin,
ymin,
xmax,
ymax,
confidence)
obj_det_rets.append(obj_det_ret)
return obj_det_rets
def get_dummy_image(self):
input_shape = self.sess.get_inputs()[0].shape
_, _, self.input_height, self.input_width = input_shape
dummy_image = np.zeros((self.input_height, self.input_width, 3),
dtype=np.uint8)
return dummy_image
| 42.963504
| 124
| 0.497282
|
b02b1165e6480c616876d118d7207e9535e5f4bc
| 1,297
|
py
|
Python
|
app/proxy.py
|
carlosMarioGonzalez/servidor
|
871ee95a57f83175316d65e70cef240f747380a5
|
[
"MIT"
] | null | null | null |
app/proxy.py
|
carlosMarioGonzalez/servidor
|
871ee95a57f83175316d65e70cef240f747380a5
|
[
"MIT"
] | null | null | null |
app/proxy.py
|
carlosMarioGonzalez/servidor
|
871ee95a57f83175316d65e70cef240f747380a5
|
[
"MIT"
] | null | null | null |
# example_consumer.py
import pika, os, csv
from datetime import datetime
from influxdb_client import InfluxDBClient, Point, WritePrecision
from influxdb_client.client.write_api import SYNCHRONOUS
# You can generate a Token from the "Tokens Tab" in the UI
token = "MlUoRzyipVkKqyGSjil7696heOcs8s4JDI_IVNWKqvZ5_eVAbaht16Fwwm46oJN0PRUQnu9-L7W0qhpgoAjNFA=="
org = "taller 3"
bucket = "dispositivo1"
client = InfluxDBClient(url="http://52.234.212.255:8086", token=token)
def process_function(msg):
mesage = msg.decode("utf-8")
print(mesage)
write_api = client.write_api(write_options=SYNCHRONOUS)
data = "mem,host=host1 used_percent=" + mesage
write_api.write(bucket, org, data)
return
while 1:
url = os.environ.get('CLOUDAMQP_URL', 'amqp://guest:guest@rabbit:5672/%2f')
params = pika.URLParameters(url)
connection = pika.BlockingConnection(params)
channel = connection.channel() # start a channel
channel.queue_declare(queue='mensajes') # Declare a queue
# create a function which is called on incoming messages
def callback(ch, method, properties, body):
process_function(body)
# set up subscription on the queue
channel.basic_consume('mensajes',
callback,
auto_ack=True)
# start consuming (blocks)
channel.start_consuming()
connection.close()
| 30.880952
| 98
| 0.760216
|
38e850630e4cfe3a5f24315791c77ffeed270f2b
| 93
|
py
|
Python
|
PeopleApp/apps.py
|
kshitij1234/Chemisty-Department-Website
|
44848fe213aa47e8c02ca612f81c2b49a28b09d1
|
[
"MIT"
] | null | null | null |
PeopleApp/apps.py
|
kshitij1234/Chemisty-Department-Website
|
44848fe213aa47e8c02ca612f81c2b49a28b09d1
|
[
"MIT"
] | null | null | null |
PeopleApp/apps.py
|
kshitij1234/Chemisty-Department-Website
|
44848fe213aa47e8c02ca612f81c2b49a28b09d1
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class PeopleappConfig(AppConfig):
name = 'PeopleApp'
| 15.5
| 33
| 0.763441
|
ab25b3f55aff45b478aab95b4c4a8b7c92c46c97
| 578
|
py
|
Python
|
ecc/wsgi.py
|
SocialGouv/ecollecte
|
1bfce2e0700b563c111c11452356b46ecb2630e4
|
[
"MIT"
] | 9
|
2018-11-28T07:36:37.000Z
|
2022-02-04T12:56:11.000Z
|
ecc/wsgi.py
|
betagouv/e-controle
|
b6f790ca2590ac257a47930a1e521b86ce3edb29
|
[
"MIT"
] | 154
|
2018-11-22T14:41:17.000Z
|
2022-02-12T08:48:57.000Z
|
ecc/wsgi.py
|
betagouv/e-controle
|
b6f790ca2590ac257a47930a1e521b86ce3edb29
|
[
"MIT"
] | 10
|
2018-11-13T06:57:10.000Z
|
2022-03-21T13:04:49.000Z
|
"""
WSGI config for ecc project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
from dotenv import load_dotenv
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ecc.settings")
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
env_path = os.path.join(BASE_DIR, '.env')
load_dotenv(dotenv_path=env_path, override=True)
application = get_wsgi_application()
| 26.272727
| 78
| 0.783737
|
10baf7eab1fdec4f200569729042a0744c2c5450
| 10,462
|
py
|
Python
|
awx/main/tests/unit/api/test_generics.py
|
doziya/ansible
|
96f7371493043e2ae596d059f2ca990bd0a28ad5
|
[
"Apache-2.0"
] | 1
|
2021-06-11T20:01:06.000Z
|
2021-06-11T20:01:06.000Z
|
awx/main/tests/unit/api/test_generics.py
|
doziya/ansible
|
96f7371493043e2ae596d059f2ca990bd0a28ad5
|
[
"Apache-2.0"
] | 4
|
2020-04-29T23:03:16.000Z
|
2022-03-01T23:56:09.000Z
|
awx/main/tests/unit/api/test_generics.py
|
doziya/ansible
|
96f7371493043e2ae596d059f2ca990bd0a28ad5
|
[
"Apache-2.0"
] | 1
|
2018-06-06T08:47:22.000Z
|
2018-06-06T08:47:22.000Z
|
# Python
import pytest
import mock
# DRF
from rest_framework import status
from rest_framework.response import Response
from rest_framework.exceptions import PermissionDenied
# AWX
from awx.api.generics import (
ParentMixin,
SubListCreateAttachDetachAPIView, SubListAttachDetachAPIView,
DeleteLastUnattachLabelMixin,
ResourceAccessList,
ListAPIView
)
from awx.main.models import Organization, Credential
@pytest.fixture
def get_object_or_404(mocker):
# pytest patch without return_value generates a random value, we are counting on this
return mocker.patch('awx.api.generics.get_object_or_404')
@pytest.fixture
def get_object_or_400(mocker):
return mocker.patch('awx.api.generics.get_object_or_400')
@pytest.fixture
def mock_response_new(mocker):
m = mocker.patch('awx.api.generics.Response.__new__')
m.return_value = m
return m
@pytest.fixture
def mock_organization():
return Organization(pk=4, name="Unsaved Org")
@pytest.fixture
def parent_relationship_factory(mocker):
def rf(serializer_class, relationship_name, relationship_value=mocker.Mock()):
mock_parent_relationship = mocker.MagicMock(**{'%s.add.return_value' % relationship_name: relationship_value})
mocker.patch('awx.api.generics.ParentMixin.get_parent_object', return_value=mock_parent_relationship)
serializer = serializer_class()
[setattr(serializer, x, '') for x in ['relationship', 'model', 'parent_model']]
serializer.relationship = relationship_name
return (serializer, mock_parent_relationship)
return rf
# TODO: Test create and associate failure (i.e. id doesn't exist, record already exists, permission denied)
# TODO: Mock and check return (Response)
class TestSubListCreateAttachDetachAPIView:
def test_attach_validate_ok(self, mocker):
mock_request = mocker.MagicMock(data=dict(id=1))
serializer = SubListCreateAttachDetachAPIView()
(sub_id, res) = serializer.attach_validate(mock_request)
assert sub_id == 1
assert res is None
def test_attach_validate_invalid_type(self, mocker):
mock_request = mocker.MagicMock(data=dict(id='foobar'))
serializer = SubListCreateAttachDetachAPIView()
(sub_id, res) = serializer.attach_validate(mock_request)
assert type(res) is Response
def test_attach_create_and_associate(self, mocker, get_object_or_400, parent_relationship_factory, mock_response_new):
(serializer, mock_parent_relationship) = parent_relationship_factory(SubListCreateAttachDetachAPIView, 'wife')
create_return_value = mocker.MagicMock(status_code=status.HTTP_201_CREATED)
serializer.create = mocker.Mock(return_value=create_return_value)
mock_request = mocker.MagicMock(data=dict())
ret = serializer.attach(mock_request, None, None)
assert ret == mock_response_new
serializer.create.assert_called_with(mock_request, None, None)
mock_parent_relationship.wife.add.assert_called_with(get_object_or_400.return_value)
mock_response_new.assert_called_with(Response, create_return_value.data, status=status.HTTP_201_CREATED, headers={'Location': create_return_value['Location']})
def test_attach_associate_only(self, mocker, get_object_or_400, parent_relationship_factory, mock_response_new):
(serializer, mock_parent_relationship) = parent_relationship_factory(SubListCreateAttachDetachAPIView, 'wife')
serializer.create = mocker.Mock(return_value=mocker.MagicMock())
mock_request = mocker.MagicMock(data=dict(id=1))
ret = serializer.attach(mock_request, None, None)
assert ret == mock_response_new
serializer.create.assert_not_called()
mock_parent_relationship.wife.add.assert_called_with(get_object_or_400.return_value)
mock_response_new.assert_called_with(Response, status=status.HTTP_204_NO_CONTENT)
def test_unattach_validate_ok(self, mocker):
mock_request = mocker.MagicMock(data=dict(id=1))
serializer = SubListCreateAttachDetachAPIView()
(sub_id, res) = serializer.unattach_validate(mock_request)
assert sub_id == 1
assert res is None
def test_unattach_validate_invalid_type(self, mocker):
mock_request = mocker.MagicMock(data=dict(id='foobar'))
serializer = SubListCreateAttachDetachAPIView()
(sub_id, res) = serializer.unattach_validate(mock_request)
assert type(res) is Response
def test_unattach_validate_missing_id(self, mocker):
mock_request = mocker.MagicMock(data=dict())
serializer = SubListCreateAttachDetachAPIView()
(sub_id, res) = serializer.unattach_validate(mock_request)
assert sub_id is None
assert type(res) is Response
def test_unattach_by_id_ok(self, mocker, parent_relationship_factory, get_object_or_400):
(serializer, mock_parent_relationship) = parent_relationship_factory(SubListCreateAttachDetachAPIView, 'wife')
mock_request = mocker.MagicMock()
mock_sub = mocker.MagicMock(name="object to unattach")
get_object_or_400.return_value = mock_sub
res = serializer.unattach_by_id(mock_request, 1)
assert type(res) is Response
assert res.status_code == status.HTTP_204_NO_CONTENT
mock_parent_relationship.wife.remove.assert_called_with(mock_sub)
def test_unattach_ok(self, mocker):
mock_request = mocker.MagicMock()
mock_sub_id = mocker.MagicMock()
view = SubListCreateAttachDetachAPIView()
view.unattach_validate = mocker.MagicMock()
view.unattach_by_id = mocker.MagicMock()
view.unattach_validate.return_value = (mock_sub_id, None)
view.unattach(mock_request)
view.unattach_validate.assert_called_with(mock_request)
view.unattach_by_id.assert_called_with(mock_request, mock_sub_id)
def test_unattach_invalid(self, mocker):
mock_request = mocker.MagicMock()
mock_res = mocker.MagicMock()
view = SubListCreateAttachDetachAPIView()
view.unattach_validate = mocker.MagicMock()
view.unattach_by_id = mocker.MagicMock()
view.unattach_validate.return_value = (None, mock_res)
view.unattach(mock_request)
view.unattach_validate.assert_called_with(mock_request)
view.unattach_by_id.assert_not_called()
def test_attach_detatch_only(mocker):
mock_request = mocker.MagicMock()
mock_request.data = {'name': 'name for my new model'}
view = SubListAttachDetachAPIView()
view.model = mocker.MagicMock()
view.model._meta = mocker.MagicMock()
view.model._meta.verbose_name = "Foo Bar"
resp = view.post(mock_request)
assert 'Foo Bar' in resp.data['msg']
assert 'field is missing' in resp.data['msg']
class TestDeleteLastUnattachLabelMixin:
@mock.patch('__builtin__.super')
def test_unattach_ok(self, super, mocker):
mock_request = mocker.MagicMock()
mock_sub_id = mocker.MagicMock()
super.return_value = super
super.unattach_validate = mocker.MagicMock(return_value=(mock_sub_id, None))
super.unattach_by_id = mocker.MagicMock()
mock_model = mocker.MagicMock()
mock_model.objects.get.return_value = mock_model
mock_model.is_detached.return_value = True
view = DeleteLastUnattachLabelMixin()
view.model = mock_model
view.unattach(mock_request, None, None)
super.unattach_validate.assert_called_with(mock_request)
super.unattach_by_id.assert_called_with(mock_request, mock_sub_id)
mock_model.is_detached.assert_called_with()
mock_model.objects.get.assert_called_with(id=mock_sub_id)
mock_model.delete.assert_called_with()
@mock.patch('__builtin__.super')
def test_unattach_fail(self, super, mocker):
mock_request = mocker.MagicMock()
mock_response = mocker.MagicMock()
super.return_value = super
super.unattach_validate = mocker.MagicMock(return_value=(None, mock_response))
view = DeleteLastUnattachLabelMixin()
res = view.unattach(mock_request, None, None)
super.unattach_validate.assert_called_with(mock_request)
assert mock_response == res
class TestParentMixin:
def test_get_parent_object(self, mocker, get_object_or_404):
parent_mixin = ParentMixin()
parent_mixin.lookup_field = 'foo'
parent_mixin.kwargs = dict(foo='bar')
parent_mixin.parent_model = 'parent_model'
mock_parent_mixin = mocker.MagicMock(wraps=parent_mixin)
return_value = mock_parent_mixin.get_parent_object()
get_object_or_404.assert_called_with(parent_mixin.parent_model, **parent_mixin.kwargs)
assert get_object_or_404.return_value == return_value
class TestResourceAccessList:
def mock_request(self):
return mock.MagicMock(
user=mock.MagicMock(
is_anonymous=mock.MagicMock(return_value=False),
is_superuser=False
), method='GET')
def mock_view(self):
view = ResourceAccessList()
view.parent_model = Organization
view.kwargs = {'pk': 4}
return view
def test_parent_access_check_failed(self, mocker, mock_organization):
with mocker.patch('awx.api.permissions.get_object_or_400', return_value=mock_organization):
mock_access = mocker.MagicMock(__name__='for logger', return_value=False)
with mocker.patch('awx.main.access.BaseAccess.can_read', mock_access):
with pytest.raises(PermissionDenied):
self.mock_view().check_permissions(self.mock_request())
mock_access.assert_called_once_with(mock_organization)
def test_parent_access_check_worked(self, mocker, mock_organization):
with mocker.patch('awx.api.permissions.get_object_or_400', return_value=mock_organization):
mock_access = mocker.MagicMock(__name__='for logger', return_value=True)
with mocker.patch('awx.main.access.BaseAccess.can_read', mock_access):
self.mock_view().check_permissions(self.mock_request())
mock_access.assert_called_once_with(mock_organization)
def test_related_search_reverse_FK_field():
view = ListAPIView()
view.model = Credential
assert 'jobtemplates__search' in view.related_search_fields
| 38.322344
| 167
| 0.72749
|
5323ee5f0bc84c5f271292d8f06c3d6719f38ef6
| 3,254
|
py
|
Python
|
train_model.py
|
simonefinelli/ASL-Real-time-Recognition
|
3576051d3aa8ca3935ee5aeb3275ec5dec711821
|
[
"MIT"
] | 6
|
2021-01-11T11:32:27.000Z
|
2022-03-10T00:41:26.000Z
|
train_model.py
|
simonefinelli/ASL-Real-time-Recognition
|
3576051d3aa8ca3935ee5aeb3275ec5dec711821
|
[
"MIT"
] | 1
|
2022-03-09T05:57:29.000Z
|
2022-03-09T05:57:29.000Z
|
train_model.py
|
simonefinelli/ASL-Real-time-Recognition
|
3576051d3aa8ca3935ee5aeb3275ec5dec711821
|
[
"MIT"
] | 1
|
2021-08-17T13:45:00.000Z
|
2021-08-17T13:45:00.000Z
|
import os
from itertools import islice
import matplotlib.pyplot as plt
import pandas as pd
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.optimizers import Adam
from data_utils import labels_to_number, videos_to_dict
from frame_generator import VideoFrameGenerator
from models import create_model_wlasl20c
# model settings
height = 224
width = 224
dim = (height, width)
batch_size = 8
frames = 10
channels = 3
output = 20
TRAIN_PATH = './data/train/'
VAL_PATH = './data/val/'
TEST_PATH = './data/test/'
# transform labels from string to number
labels = labels_to_number(TRAIN_PATH)
print(f'Labels: {labels}')
# load dataset as dict
y_train_dict = videos_to_dict(TRAIN_PATH, labels)
y_val_dict = videos_to_dict(VAL_PATH, labels)
y_test_dict = videos_to_dict(TEST_PATH, labels)
print(f'\nTrain set: {len(y_train_dict)} videos - with labels')
print(f'Val set: {len(y_val_dict)} videos - with labels')
print(f'Test set: {len(y_test_dict)} videos - with labels')
print(f'Train set samples: {list(islice(y_train_dict.items(), 3))}')
print(f'Val set samples: {list(islice(y_val_dict.items(), 3))}')
print(f'Test set samples: {list(islice(y_test_dict.items(), 3))}')
# get video paths (without labels)
X_train = list(y_train_dict.keys())
X_val = list(y_val_dict.keys())
X_test = list(y_test_dict.keys())
print(f'\nTrain set: {len(X_train)} videos')
print(f'Val set: {len(X_val)} videos')
print(f'Test set: {len(X_test)} videos')
print(f'Train set samples: {X_train[:4]}')
print(f'Val set samples: {X_val[:4]}')
print(f'Test set samples: {X_test[:4]}')
# instantiation of generators for train and val sets
print('\nTrain generator')
train_generator = VideoFrameGenerator(
list_IDs=X_train,
labels=y_train_dict,
batch_size=batch_size,
dim=dim,
n_channels=3,
n_sequence=frames,
shuffle=True,
type_gen='train'
)
print('\nVal generator')
val_generator = VideoFrameGenerator(
list_IDs=X_val,
labels=y_val_dict,
batch_size=batch_size,
dim=dim,
n_channels=3,
n_sequence=frames,
shuffle=True,
type_gen='val'
)
# model building
print('\nModel building and compiling . . .')
model = create_model_wlasl20c(frames, width, height, channels, output)
model.summary()
# model compiling
adam = Adam(learning_rate=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-07,
amsgrad=False, name="Adam")
model.compile(loss='sparse_categorical_crossentropy',
optimizer=adam,
metrics=['accuracy'])
# callbacks creation
if not os.path.isdir('./saved_models/'):
os.mkdir('./saved_models/')
# save the best model each time
path = './saved_models/'
checkpoint_cb = ModelCheckpoint(path + 'best_model.h5', save_best_only=True)
# start training
print('\nStart training . . .')
learn_epochs = 300
history = model.fit(train_generator,
validation_data=val_generator,
epochs=learn_epochs,
callbacks=[checkpoint_cb])
# save learning curves
if not os.path.isdir('./plots/'):
os.mkdir('./plots/')
print('\nSaving learning curves graph . . .')
pd.DataFrame(history.history).plot(figsize=(9, 6))
plt.grid(True)
plt.gca().set_ylim(0, 4)
plt.savefig('./plots/learning_curves.png')
| 28.051724
| 76
| 0.710817
|
b9855db19e535ccb62accb0305b2a93047b61981
| 5,171
|
py
|
Python
|
docs/conf.py
|
jiayeguo/geometry_analysis
|
c6c7073c9ad36d1a736e0cd8b03aac5f8f41bfb0
|
[
"BSD-3-Clause"
] | null | null | null |
docs/conf.py
|
jiayeguo/geometry_analysis
|
c6c7073c9ad36d1a736e0cd8b03aac5f8f41bfb0
|
[
"BSD-3-Clause"
] | null | null | null |
docs/conf.py
|
jiayeguo/geometry_analysis
|
c6c7073c9ad36d1a736e0cd8b03aac5f8f41bfb0
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'geometry_analysis'
copyright = ("2019, Jiaye Guo. Project structure based on the "
"Computational Molecular Science Python Cookiecutter version 1.0")
author = 'Jiaye Guo'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
]
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'geometry_analysisdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'geometry_analysis.tex', 'geometry_analysis Documentation',
'geometry_analysis', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'geometry_analysis', 'geometry_analysis Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'geometry_analysis', 'geometry_analysis Documentation',
author, 'geometry_analysis', 'This is a python package for MolSSI Summer School 2019.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
| 31.339394
| 92
| 0.65674
|
49b9b9ee59f5fdaff4046d2046ccc5f4c38ad039
| 733
|
py
|
Python
|
utils/birthday.py
|
DylanTracey/Quantifying-the-Privacy-Techniques-of-Adblockers
|
0bb272beb26207b914d610fa0f66eb6543285d7f
|
[
"MIT"
] | null | null | null |
utils/birthday.py
|
DylanTracey/Quantifying-the-Privacy-Techniques-of-Adblockers
|
0bb272beb26207b914d610fa0f66eb6543285d7f
|
[
"MIT"
] | 1
|
2019-07-27T13:55:19.000Z
|
2019-07-27T13:55:19.000Z
|
utils/birthday.py
|
DylanTracey/Quantifying-the-Privacy-Techniques-of-Adblockers
|
0bb272beb26207b914d610fa0f66eb6543285d7f
|
[
"MIT"
] | null | null | null |
# from https://en.wikipedia.org/wiki/Birthday_attack#Source_code_example
from math import log1p, sqrt
from settings import r
def birthday(probability_exponent, bits):
"""
Used to estimate the probability cookie segments will clash. Then multiply this result with the likelihood that two
of the same first party sites are visited at once by users using the same public IP address.
This creates the likelihood of a misrecorded history entry.
Not used in the site. Only used to run locally for estimation.
"""
probability = 10. ** probability_exponent
print(probability)
outputs = 2. ** bits
print(outputs)
print(sqrt(2. * outputs * -log1p(-probability)))
birthday(-2, 10)
r.flushall()
| 30.541667
| 119
| 0.728513
|
55f9fa17f54e9d3bc1d51b02de71ce3efd3a2a49
| 6,496
|
py
|
Python
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_cosmosdb/operations/_partition_key_range_id_region_operations.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 2
|
2021-03-24T21:06:20.000Z
|
2021-03-24T21:07:58.000Z
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_cosmosdb/operations/_partition_key_range_id_region_operations.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 3
|
2020-05-27T20:16:26.000Z
|
2020-07-23T19:46:49.000Z
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_cosmosdb/operations/_partition_key_range_id_region_operations.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 5
|
2020-05-09T17:47:09.000Z
|
2020-10-01T19:52:06.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class PartitionKeyRangeIdRegionOperations(object):
"""PartitionKeyRangeIdRegionOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: The API version to use for the request. Constant value: "2021-03-01-preview".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2021-03-01-preview"
self.config = config
def list_metrics(
self, resource_group_name, account_name, region, database_rid, collection_rid, partition_key_range_id, filter, custom_headers=None, raw=False, **operation_config):
"""Retrieves the metrics determined by the given filter for the given
partition key range id and region.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param region: Cosmos DB region, with spaces between words and each
word capitalized.
:type region: str
:param database_rid: Cosmos DB database rid.
:type database_rid: str
:param collection_rid: Cosmos DB collection rid.
:type collection_rid: str
:param partition_key_range_id: Partition Key Range Id for which to get
data.
:type partition_key_range_id: str
:param filter: An OData filter expression that describes a subset of
metrics to return. The parameters that can be filtered are name.value
(name of the metric, can have an or of multiple names), startTime,
endTime, and timeGrain. The supported operator is eq.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of PartitionMetric
:rtype:
~azure.mgmt.cosmosdb.models.PartitionMetricPaged[~azure.mgmt.cosmosdb.models.PartitionMetric]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_metrics.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'region': self._serialize.url("region", region, 'str'),
'databaseRid': self._serialize.url("database_rid", database_rid, 'str'),
'collectionRid': self._serialize.url("collection_rid", collection_rid, 'str'),
'partitionKeyRangeId': self._serialize.url("partition_key_range_id", partition_key_range_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.PartitionMetricPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_metrics.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/region/{region}/databases/{databaseRid}/collections/{collectionRid}/partitionKeyRangeId/{partitionKeyRangeId}/metrics'}
| 48.118519
| 285
| 0.651016
|
2f08b0790e55ffcf38737bdf17a16005d6573a1c
| 284
|
py
|
Python
|
course/2of5/exercise/exercise_7_1.py
|
GloryPassarello/Python
|
e08df3f0ac1f6376ea08740fa7d674d68c69f448
|
[
"CNRI-Python",
"AAL"
] | null | null | null |
course/2of5/exercise/exercise_7_1.py
|
GloryPassarello/Python
|
e08df3f0ac1f6376ea08740fa7d674d68c69f448
|
[
"CNRI-Python",
"AAL"
] | null | null | null |
course/2of5/exercise/exercise_7_1.py
|
GloryPassarello/Python
|
e08df3f0ac1f6376ea08740fa7d674d68c69f448
|
[
"CNRI-Python",
"AAL"
] | null | null | null |
file_name = raw_input("Enter a file with the absolte path: ")
#file_text = open('/home/anabrs1/Python_material/code/mbox-short.txt')
try:
file_text = open(file_name)
except:
print "You should insert the name with the path."
exit()
inp = file_text.read()
INP = inp.upper()
print INP
| 28.4
| 70
| 0.735915
|
263c78e11ddce5b64bb1483f049907a7db6040f7
| 525
|
py
|
Python
|
danceschool/discounts/migrations/0011_auto_20210127_2052.py
|
django-danceschool/django-danceschool
|
65ae09ffdcb0821e82df0e1f634fe13c0384a525
|
[
"BSD-3-Clause"
] | 32
|
2017-09-12T04:25:25.000Z
|
2022-03-21T10:48:07.000Z
|
danceschool/discounts/migrations/0011_auto_20210127_2052.py
|
django-danceschool/django-danceschool
|
65ae09ffdcb0821e82df0e1f634fe13c0384a525
|
[
"BSD-3-Clause"
] | 97
|
2017-09-01T02:43:08.000Z
|
2022-01-03T18:20:34.000Z
|
danceschool/discounts/migrations/0011_auto_20210127_2052.py
|
django-danceschool/django-danceschool
|
65ae09ffdcb0821e82df0e1f634fe13c0384a525
|
[
"BSD-3-Clause"
] | 19
|
2017-09-26T13:34:46.000Z
|
2022-03-21T10:48:10.000Z
|
# Generated by Django 2.2.17 on 2021-01-28 01:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('discounts', '0010_merge_20191028_1925'),
]
operations = [
migrations.AddField(
model_name='registrationdiscount',
name='applied',
field=models.BooleanField(null=True, verbose_name='Use finalized'),
),
migrations.DeleteModel(
name='TemporaryRegistrationDiscount',
),
]
| 23.863636
| 79
| 0.619048
|
e4e076189eebfb4baa2277aa3e45531ac91ba2e7
| 3,348
|
py
|
Python
|
guardian/testapp/tests/conf.py
|
ellmetha/django-guardian
|
af822d75113d048c7686b5b8790a57113c9ef604
|
[
"BSD-2-Clause"
] | null | null | null |
guardian/testapp/tests/conf.py
|
ellmetha/django-guardian
|
af822d75113d048c7686b5b8790a57113c9ef604
|
[
"BSD-2-Clause"
] | null | null | null |
guardian/testapp/tests/conf.py
|
ellmetha/django-guardian
|
af822d75113d048c7686b5b8790a57113c9ef604
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import unicode_literals
import os
import django
from guardian.compat import unittest
from guardian.utils import abspath
from django.conf import settings
from django.conf import UserSettingsHolder
from django.utils.functional import wraps
THIS = abspath(os.path.dirname(__file__))
TEST_TEMPLATES_DIR = abspath(THIS, 'templates')
TEST_SETTINGS = dict(
TEMPLATE_DIRS=[TEST_TEMPLATES_DIR],
)
def skipUnlessTestApp(obj):
app = 'guardian.testapp'
return unittest.skipUnless(app in settings.INSTALLED_APPS,
'app %r must be installed to run this test' % app)(obj)
def skipUnlessSupportsCustomUser(obj):
# XXX: Following fixes problem with Python 2.6 and Django 1.2
gte15 = django.VERSION >= (1, 5)
if not gte15:
return lambda *args, **kwargs: None
# XXX: End of the workaround
return unittest.skipUnless(django.VERSION >= (1, 5), 'Must have Django 1.5 or greater')(obj)
class TestDataMixin(object):
def setUp(self):
super(TestDataMixin, self).setUp()
from django.contrib.auth.models import Group
try:
from django.contrib.auth import get_user_model
User = get_user_model()
except ImportError:
from django.contrib.auth.models import User
Group.objects.create(pk=1, name='admins')
jack_group = Group.objects.create(pk=2, name='jackGroup')
User.objects.get_or_create(pk=settings.ANONYMOUS_USER_ID)
jack = User.objects.create(pk=1, username='jack', is_active=True,
is_superuser=False, is_staff=False)
jack.groups.add(jack_group)
class override_settings(object):
"""
Acts as either a decorator, or a context manager. If it's a decorator it
takes a function and returns a wrapped function. If it's a contextmanager
it's used with the ``with`` statement. In either event entering/exiting
are called before and after, respectively, the function/block is executed.
"""
def __init__(self, **kwargs):
self.options = kwargs
self.wrapped = settings._wrapped
def __enter__(self):
self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def __call__(self, test_func):
from django.test import TransactionTestCase
if isinstance(test_func, type) and issubclass(test_func, TransactionTestCase):
original_pre_setup = test_func._pre_setup
original_post_teardown = test_func._post_teardown
def _pre_setup(innerself):
self.enable()
original_pre_setup(innerself)
def _post_teardown(innerself):
original_post_teardown(innerself)
self.disable()
test_func._pre_setup = _pre_setup
test_func._post_teardown = _post_teardown
return test_func
else:
@wraps(test_func)
def inner(*args, **kwargs):
with self:
return test_func(*args, **kwargs)
return inner
def enable(self):
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
settings._wrapped = override
def disable(self):
settings._wrapped = self.wrapped
| 33.818182
| 96
| 0.663082
|
09dc8962a98bb0f8a98e9fecbcc6fd9949cfc86f
| 3,198
|
py
|
Python
|
inferelator/tests/artifacts/test_stubs.py
|
gs512/inferelator
|
391223bd8d07476db72c4c7b1cd5fb5bf7494b9c
|
[
"BSD-2-Clause"
] | 1
|
2019-05-13T23:12:48.000Z
|
2019-05-13T23:12:48.000Z
|
inferelator/tests/artifacts/test_stubs.py
|
gs512/inferelator
|
391223bd8d07476db72c4c7b1cd5fb5bf7494b9c
|
[
"BSD-2-Clause"
] | null | null | null |
inferelator/tests/artifacts/test_stubs.py
|
gs512/inferelator
|
391223bd8d07476db72c4c7b1cd5fb5bf7494b9c
|
[
"BSD-2-Clause"
] | null | null | null |
from inferelator import amusr_workflow
from inferelator import workflow
from inferelator.regression.base_regression import RegressionWorkflow
from inferelator.postprocessing.results_processor import ResultsProcessor
from inferelator.tests.artifacts.test_data import TestDataSingleCellLike, TEST_DATA, TEST_DATA_SPARSE
from inferelator.utils import InferelatorData
import pandas as pd
import numpy as np
class NoOutputRP(ResultsProcessor):
def summarize_network(self, output_dir, gold_standard, priors):
return super(NoOutputRP, self).summarize_network(None, gold_standard, priors)
# Factory method to spit out a puppet workflow
def create_puppet_workflow(regression_class=RegressionWorkflow,
base_class=workflow.WorkflowBase,
result_processor_class=NoOutputRP):
puppet_parent = workflow._factory_build_inferelator(regression=regression_class, workflow=base_class)
class PuppetClass(puppet_parent):
"""
Standard workflow except it takes all the data as references to __init__ instead of as filenames on disk or
as environment variables, and returns the model AUPR and edge counts without writing files (unless told to)
"""
write_network = True
network_file_name = None
pr_curve_file_name = None
initialize_mp = False
def __init__(self, data, prior_data, gs_data):
self.data = data
self.priors_data = prior_data
self.gold_standard = gs_data
def startup_run(self):
# Skip all of the data loading
self.process_priors_and_gold_standard()
def create_output_dir(self, *args, **kwargs):
pass
return PuppetClass
class TaskDataStub(amusr_workflow.create_task_data_class(workflow_class="single-cell")):
priors_data = TestDataSingleCellLike.priors_data
tf_names = TestDataSingleCellLike.tf_names
meta_data_task_column = "Condition"
tasks_from_metadata = True
task_name = "TestStub"
task_workflow_type = "single-cell"
def __init__(self, sparse=False):
self.data = TEST_DATA.copy() if not sparse else TEST_DATA_SPARSE.copy()
super(TaskDataStub, self).__init__()
def get_data(self):
if self.tasks_from_metadata:
return self.separate_tasks_by_metadata()
else:
return [self]
class FakeDRD:
def __init__(self, *args, **kwargs):
pass
def run(self, expr, meta):
return expr, expr, expr
class FakeWriter(object):
def writerow(self, *args, **kwargs):
pass
class FakeRegression(RegressionWorkflow):
def run_regression(self):
beta = [pd.DataFrame(np.array([[0, 1], [0.5, 0.05]]), index=['gene1', 'gene2'], columns=['tf1', 'tf2'])]
beta_resc = [pd.DataFrame(np.array([[0, 1], [1, 0.05]]), index=['gene1', 'gene2'], columns=['tf1', 'tf2'])]
return beta, beta_resc
def run_bootstrap(self, bootstrap):
return True
class FakeResultProcessor:
network_data = None
def __init__(self, *args, **kwargs):
pass
def summarize_network(self, *args, **kwargs):
return 1, 0, 0
| 30.75
| 115
| 0.688243
|
f495edcee4d902fae91fa76d6249eae3a265806a
| 13,954
|
py
|
Python
|
yolo_Mobilenet.py
|
dashings/keras-YOLOv3-mobilenet
|
a7f43f08acb24c31e2061b14a8c34f8e7d1dd18a
|
[
"MIT"
] | 629
|
2018-09-05T20:05:04.000Z
|
2022-03-21T11:25:02.000Z
|
yolo_Mobilenet.py
|
dashings/keras-YOLOv3-mobilenet
|
a7f43f08acb24c31e2061b14a8c34f8e7d1dd18a
|
[
"MIT"
] | 61
|
2018-09-10T21:05:54.000Z
|
2021-05-27T10:46:00.000Z
|
yolo_Mobilenet.py
|
dashings/keras-YOLOv3-mobilenet
|
a7f43f08acb24c31e2061b14a8c34f8e7d1dd18a
|
[
"MIT"
] | 205
|
2018-09-06T07:02:36.000Z
|
2022-03-18T21:25:43.000Z
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Run a YOLO_v3 style detection model on test images.
"""
import colorsys
import os
from timeit import default_timer as timer
import tensorflow as tf
import numpy as np
from keras import backend as K
from keras.models import load_model
from keras.layers import Input
from PIL import Image, ImageFont, ImageDraw
from yolo3.model_Mobilenet import yolo_eval, yolo_body, tiny_yolo_body
from yolo3.utils import letterbox_image
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
from keras.utils import multi_gpu_model
gpu_num=1
class YOLO(object):
def __init__(self):
self.model_path = 'logs/carMobilenet/001_Mobilenet_finetune/trained_weights_final.h5' # model path or trained weights path
self.anchors_path = 'model_data/yolo_anchors.txt'
self.classes_path = 'model_data/car_classes.txt'
self.score = 0.3
self.iou = 0.45
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.sess = K.get_session()
self.model_image_size = (320, 320) # fixed size or (None, None), hw
self.boxes, self.scores, self.classes = self.generate()
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def _get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def generate(self):
'''to generate the bounding boxes'''
model_path = os.path.expanduser(self.model_path)
assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
# Load model, or construct model and load weights.
num_anchors = len(self.anchors)
num_classes = len(self.class_names)
is_tiny_version = num_anchors==6 # default setting
try:
self.yolo_model = load_model(model_path, compile=False)
except:
self.yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), num_anchors//2, num_classes) \
if is_tiny_version else yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)
self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match
else:
assert self.yolo_model.layers[-1].output_shape[-1] == \
num_anchors/len(self.yolo_model.output) * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes'
print('{} model, anchors, and classes loaded.'.format(model_path))
# Generate colors for drawing bounding boxes.
# hsv_tuples = [(x / len(self.class_names), 1., 1.)
# for x in range(len(self.class_names))]
# self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
# self.colors = list(
# map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
# self.colors))
# np.random.seed(10101) # Fixed seed for consistent colors across runs.
# np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.
# np.random.seed(None) # Reset seed to default.
# Generate output tensor targets for filtered bounding boxes.
self.input_image_shape = K.placeholder(shape=(2, ))
if gpu_num>=2:
self.yolo_model = multi_gpu_model(self.yolo_model, gpus=gpu_num)
boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,
len(self.class_names), self.input_image_shape,
score_threshold=self.score, iou_threshold=self.iou)
# default arg
# self.yolo_model->'model_data/yolo.h5'
# self.anchors->'model_data/yolo_anchors.txt'-> 9 scales for anchors
return boxes, scores, classes
def detect_image(self, image):
# start = timer()
rects = []
if self.model_image_size != (None, None):
assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'
assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'
boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
else:
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
boxed_image = letterbox_image(image, new_image_size)
image_data = np.array(boxed_image, dtype='float32')
# print(image_data.shape)
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
# tf.Session.run(fetches, feed_dict=None)
# Runs the operations and evaluates the tensors in fetches.
#
# Args:
# fetches: A single graph element, or a list of graph elements(described above).
#
# feed_dict: A dictionary that maps graph elements to values(described above).
#
# Returns:Either a single value if fetches is a single graph element, or a
# list of values if fetches is a list(described above).
out_boxes, out_scores, out_classes = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={
self.yolo_model.input: image_data,
self.input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
# print('Found {} boxes for {}'.format(len(out_boxes), 'img'))
# font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
# size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
# thickness = (image.size[0] + image.size[1]) // 500
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = self.class_names[c]
box = out_boxes[i]
score = out_scores[i]
# label = '{} {:.2f}'.format(predicted_class, score)
# draw = ImageDraw.Draw(image)
# label_size = draw.textsize(label, font)
y1, x1, y2, x2 = box
y1 = max(0, np.floor(y1 + 0.5).astype('float32'))
x1 = max(0, np.floor(x1 + 0.5).astype('float32'))
y2 = min(image.size[1], np.floor(y2 + 0.5).astype('float32'))
x2 = min(image.size[0], np.floor(x2 + 0.5).astype('float32'))
# print(label, (x1, y1), (x2, y2))
bbox = dict([("score",str(score)),("x1",str(x1)),("y1", str(y1)),("x2", str(x2)),("y2", str(y2))])
rects.append(bbox)
# if y1 - label_size[1] >= 0:
# text_origin = np.array([x1, y1 - label_size[1]])
# else:
# text_origin = np.array([x1, y1 + 1])
#
# # My kingdom for a good redistributable image drawing library.
# for i in range(thickness):
# draw.rectangle(
# [x1 + i, y1 + i, x2 - i, y2 - i],
# outline=self.colors[c])
# draw.rectangle(
# [tuple(text_origin), tuple(text_origin + label_size)],
# fill=self.colors[c])
# draw.text(text_origin, label, fill=(0, 0, 0), font=font)
# del draw
#
# end = timer()
# print(str(end - start))
return rects
def close_session(self):
self.sess.close()
def detect_video(yolo, video_path, output_path=""):
import cv2
vid = cv2.VideoCapture(video_path)
if not vid.isOpened():
raise IOError("Couldn't open webcam or video")
video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
video_fps = vid.get(cv2.CAP_PROP_FPS)
video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
isOutput = True if output_path != "" else False
if isOutput:
print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size))
out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
accum_time = 0
curr_fps = 0
fps = "FPS: ??"
prev_time = timer()
while True:
return_value, frame = vid.read()
image = Image.fromarray(frame)
image = yolo.detect_image(image)
result = np.asarray(image)
curr_time = timer()
exec_time = curr_time - prev_time
prev_time = curr_time
accum_time = accum_time + exec_time
curr_fps = curr_fps + 1
if accum_time > 1:
accum_time = accum_time - 1
fps = "FPS: " + str(curr_fps)
curr_fps = 0
cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.50, color=(255, 0, 0), thickness=2)
cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.imshow("result", result)
if isOutput:
out.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
yolo.close_session()
def detect_img(yolo):
while True:
img = input('Input image filename:')
try:
image = Image.open(img)
except:
print('Open Error! Try again!')
continue
else:
r_image = yolo.detect_image(image)
r_image.show()
yolo.close_session()
def detect_test_draw(yolo,json_name,test_pic):
import cv2
import json
data_dst = 'dataset/brainwash/'
with open(json_name) as load_f:
load_dict = json.load(load_f)
for pic in load_dict:
picname = pic['image_path']
root,name = os.path.split(picname)
print(name)
image = Image.open(data_dst + picname)
rects = yolo.detect_image(image)
frame = cv2.imread(data_dst+picname)
for rect in rects:
score, x1, y1, x2, y2 = float(rect['score']),int(float(rect['x1'])),int(float(rect['y1'])),int(float(rect['x2'])),int(float(rect['y2']))
cv2.rectangle(frame,(x1,y1),(x2,y2),(255,255,255),1)
cv2.imwrite(test_pic+name,frame)
yolo.close_session()
def detect_test(yolo,json_name,test_out_json = 'caltech_new_result_0.001.json',data_dst = '../caltech_ped/caltech-pedestrian-dataset-converter/'):
import json
import time
#
with open(json_name) as load_f:
load_dict = json.load(load_f)
count = 0
json_images=[]
with open(test_out_json,'w') as outfile:
time_start = time.time()
for pic in load_dict:
# root, filename = os.path.split(pic['image_path'])
# name = filename.split('.')[0]
# set_id, v_id, frame_id = name.split('_')
# frame_id = int(frame_id)
#
# if frame_id % 30 == 0 and frame_id != 0:
picname = pic['image_path'][2:]
count +=1
print(picname)
image = Image.open(data_dst + picname)
rects = yolo.detect_image(image)
json_image = dict([("image_path", picname), ("rects", rects)])
json_images.append(json_image)
time_end = time.time()
duration = time_end - time_start
print('totally cost', duration)
print('{} pictures , average time {}'.format(count,duration/count))
str = json.dumps(json_images,indent=4)
outfile.write(str)
outfile.close()
yolo.close_session()
def car_detect(yolo,mainFolder = '/home/wenwen/Viewnyx/FrameImages/'):
import json
fold_list = range(1, 15)
for i in fold_list:
foldname = mainFolder+'video'+str(i)
list = os.listdir(foldname) # 列出文件夹下所有的目录与文件
json_all = {}
json_f = open('car/'+'annotation_{}_YOLOv3.json'.format('video'+str(i)),'w')
for i in range(0, len(list)):
name,ext = os.path.splitext(list[i])
if ext=='.jpg':
print(list[i])
json_pic = {}
annotation = []
image = Image.open(foldname+'/'+list[i])
rects = yolo.detect_image(image)
for rect in rects:
score, x1, y1, x2, y2 = float(rect['score']), int(float(rect['x1'])), int(float(rect['y1'])), int(
float(rect['x2'])), int(float(rect['y2']))
bbox = {"category": "sideways",
"id": 0,
"shape": ["Box",1],
"label": "car",
"x":x1,
"y":y1,
"width":x2-x1,
"height":y2-y1,
"score":score}
annotation.append(bbox)
json_pic["annotations"]=annotation
json_pic["height"] = 480
json_pic["name"] = list[i]
json_pic["width"] = 640
json_all[list[i]] = json_pic
json_f.write(json.dumps(json_all,indent=4))
json_f.close()
yolo.close_session()
if __name__ == '__main__':
car_detect(YOLO())
#detect_test(YOLO(), json_name='../mrsub/mrsub_test.json',test_out_json='mobilenet_train_bw_test_mrsub.json', data_dst='../mrsub/')
#detect_test_draw(YOLO(), json_name='dataset/brainwash/test_boxes.json',test_pic='./mobilenet_test/')
| 41.778443
| 153
| 0.561416
|
c5108848d0cf13fc68ff8b17d69b0c030d413ddf
| 4,383
|
py
|
Python
|
src/ostorlab/runtimes/local/services/mq.py
|
ju-c/ostorlab
|
92c6edc204f3a0c32d0f28a37010cbeddb1818db
|
[
"Apache-2.0"
] | null | null | null |
src/ostorlab/runtimes/local/services/mq.py
|
ju-c/ostorlab
|
92c6edc204f3a0c32d0f28a37010cbeddb1818db
|
[
"Apache-2.0"
] | null | null | null |
src/ostorlab/runtimes/local/services/mq.py
|
ju-c/ostorlab
|
92c6edc204f3a0c32d0f28a37010cbeddb1818db
|
[
"Apache-2.0"
] | null | null | null |
"""RabbitMQ service in charge of routing Agent messages."""
import binascii
import logging
import os
from typing import Dict
import docker
import tenacity
from docker import errors
from docker import types
from docker.models import services
logger = logging.getLogger(__name__)
MQ_IMAGE = 'rabbitmq:3.9-management'
class LocalRabbitMQ:
"""RabbitMQ service spawned a docker swarm service."""
def __init__(self,
name: str,
network: str,
exposed_ports: Dict[int, int] = None,
image: str = MQ_IMAGE) -> None:
"""Initialize the MQ service parameters.
Args:
name: Name of the service.
network: Network used for the Docker MQ service.
exposed_ports: The list of MQ service exposed ports
image: MQ Docker image
"""
self._name = name
self._docker_client = docker.from_env()
# images
self._mq_image = image
self._network = network
self._mq_host = f'mq_{self._name}'
# service
self._mq_service = None
# exposed_port
self._exposed_ports = exposed_ports
@property
def url(self) -> str:
"""URL to connect to the local RabbitMQ instance."""
return f'amqp://guest:guest@{self._mq_host}:5672/'
@property
def vhost(self):
"""Default vhost."""
return '/'
@property
def service(self):
return self._mq_service
@property
def management_url(self) -> str:
"""URL to connect to the management interface of the RabbitMQ instance."""
return f'http://guest:guest@{self._mq_host}:15672/'
def start(self) -> None:
"""Start local rabbit mq instance."""
self._create_network()
self._mq_service = self._start_mq()
if not self._is_service_healthy():
logger.error('MQ container for service %s is not ready', self._mq_service.id)
return
def stop(self):
for service in self._docker_client.services.list():
universe = service.attrs['Spec']['Labels'].get('ostorlab.universe')
if universe is not None and service.name.startswith('mq_') and self._name in universe:
service.remove()
def _create_network(self):
if any(network.name == self._network for network in self._docker_client.networks.list()):
logger.warning('network already exists.')
else:
logger.info('creating private network %s', self._network)
return self._docker_client.networks.create(
name=self._network,
driver='overlay',
attachable=True,
labels={'ostorlab.universe': self._name},
check_duplicate=True
)
def _start_mq(self) -> services.Service:
logger.info('starting MQ')
endpoint_spec = types.services.EndpointSpec(mode='vip', ports=self._exposed_ports)
service_mode = types.services.ServiceMode('replicated', replicas=1)
return self._docker_client.services.create(
image=self._mq_image,
networks=[self._network],
name=self._mq_host,
env=[
'TASK_ID={{.Task.Slot}}',
f'MQ_SERVICE_NAME={self._mq_host}',
f'RABBITMQ_ERLANG_COOKIE={binascii.hexlify(os.urandom(10)).decode()}',
],
restart_policy=types.RestartPolicy(condition='any'),
mode=service_mode,
labels={'ostorlab.universe': self._name, 'ostorlab.mq': ''},
endpoint_spec=endpoint_spec)
@tenacity.retry(stop=tenacity.stop_after_attempt(20),
wait=tenacity.wait_exponential(multiplier=1, max=12),
# return last value and don't raise RetryError exception.
retry_error_callback=lambda lv: lv.outcome.result(),
retry=tenacity.retry_if_result(lambda v: v is False))
def _is_service_healthy(self) -> bool:
logger.info('checking service %s', self._mq_service.name)
return self.is_healthy
@property
def is_healthy(self) -> bool:
try:
return len([task for task in self._mq_service.tasks() if task['Status']['State'] == 'running']) == 1
except errors.DockerException:
return False
| 35.346774
| 112
| 0.605749
|
4ccabd2598072f4daaa1494a93390120864b9529
| 2,621
|
py
|
Python
|
tools/create_live_composite.py
|
bitsawer/renpy-shader
|
6c750689a3d7952494a3b98a3297762bb4933308
|
[
"MIT"
] | 45
|
2016-10-04T05:03:23.000Z
|
2022-02-09T13:20:38.000Z
|
tools/create_live_composite.py
|
bitsawer/renpy-shader
|
6c750689a3d7952494a3b98a3297762bb4933308
|
[
"MIT"
] | 4
|
2016-10-04T13:35:15.000Z
|
2020-07-13T10:46:31.000Z
|
tools/create_live_composite.py
|
bitsawer/renpy-shader
|
6c750689a3d7952494a3b98a3297762bb4933308
|
[
"MIT"
] | 10
|
2017-02-16T04:36:53.000Z
|
2021-04-10T08:31:29.000Z
|
"""
Helper script for cropping images and creating a RenPy LiveComposite for them.
Quite specific and mostly useful for processing images exported from a
rendering program like Blender or from Photoshop layers.
Requires Pillow Python image processing library to be installed.
Command line example (current working directory at the base of this project):
python tools/create_live_composite.py ShaderDemo/game/images/doll
This assumes all images in the source directory have the same size. The script
crops them and creates an efficient LiveComposite that can be used for rigging
or just normally. The resulting LiveComposite is written into a .rpy-file
in the target directory.
"""
import sys
import os
from PIL import Image
IMAGES = ["png", "jpg"]
POSTFIX = "crop"
PAD = 5
sourceDir = sys.argv[1]
sourceImages = [os.path.join(sourceDir, name) for name in os.listdir(sourceDir) if name.lower().split(".")[-1] in IMAGES]
sourceImages.sort()
def findValidImages(images):
valid = []
size = None
for path in sourceImages:
image = Image.open(path)
if POSTFIX and POSTFIX in path.lower():
print("Skipping already cropped: %s" % path)
elif size is None or image.size == size:
size = image.size
valid.append((path, image))
else:
print("Image %s has size %s, should be %s? Skipped." % (path, str(image.size), str(size)))
return valid
def getCropRect(image):
x = 0
y = 0
x2 = image.size[0]
y2 = image.size[1]
box = image.getbbox()
if box:
return max(box[0] - PAD, 0), max(box[1] - PAD, 0), min(box[2] + PAD, image.size[0]), min(box[3] + PAD, image.size[1])
return x, y, x2, y2
def createName(path):
parts = path.rsplit(".", 1)
return parts[0] + POSTFIX + "." + parts[1]
results = []
for path, image in findValidImages(sourceImages):
rect = getCropRect(image)
cropped = image.crop(rect)
name = createName(path)
cropped.save(name)
print("Saved: %s. Cropped: %s" % (name, str(rect)))
results.append((name, image, rect))
name = os.path.normcase(sourceDir).split(os.sep)[-1]
with open(os.path.join(sourceDir, name + ".rpy"), "w") as f:
base = results[0]
f.write("#Automatically generated file\n\n")
f.write("image %s = LiveComposite(\n" % name)
f.write(" (%i, %i),\n" % base[1].size)
for result in results:
name, image, crop = result
name = name[name.find("images"):].replace("\\", "/")
f.write(" (%i, %i), \"%s\",\n" % (crop[0], crop[1], name))
f.write(")\n")
| 32.7625
| 125
| 0.634872
|
3ff923b244efc6378c662d1f9e2fa1e4bbc9bf72
| 4,465
|
py
|
Python
|
examples/dump.py
|
joke325/Py3rop
|
e749715b61d805a838c75eb750480ae7d15944fd
|
[
"BSD-2-Clause"
] | null | null | null |
examples/dump.py
|
joke325/Py3rop
|
e749715b61d805a838c75eb750480ae7d15944fd
|
[
"BSD-2-Clause"
] | null | null | null |
examples/dump.py
|
joke325/Py3rop
|
e749715b61d805a838c75eb750480ae7d15944fd
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2020 Janky <box@janky.tech>
# All right reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# Inspired by https://github.com/rnpgp/rnp/blob/master/src/examples/dump.c
import sys
import getopt
import os.path
from pyrop.bind import RopBind
from pyrop.error import RopError
def print_usage(program_name):
sys.stderr.write(
"Program dumps PGP packets. \n\nUsage:\n"
"\t%s [-d|-h] [input.pgp]\n"
"\t -d : indicates whether to print packet content. Data is represented as hex\n"
"\t -m : dump mpi values\n"
"\t -g : dump key fingerprints and grips\n"
"\t -j : JSON output\n"
"\t -h : prints help and exists\n" %
os.path.basename(program_name))
def stdin_reader(app_ctx, len_):
return sys.stdin.read(len_)
def stdout_writer(app_ctx, buf):
try:
sys.stdout.write(buf.decode())
return True
except IOError: pass
return False
def execute(argv, json_out=None):
input_file = None
raw = False
mpi = False
grip = False
json = False
help_ = (len(argv) < 2)
''' Parse command line options:
-i input_file [mandatory]: specifies name of the file with PGP packets
-d : indicates wether to dump whole packet content
-m : dump mpi contents
-g : dump key grips and fingerprints
-j : JSON output
-h : prints help and exists
'''
opts, args = getopt.getopt(argv[1:], 'dmgjh')
for optt in opts:
for opt in optt:
if opt == '-d':
raw = True
elif opt == '-m':
mpi = True
elif opt == '-g':
grip = True
elif opt == '-j':
json = True
elif len(opt) > 0:
help_ = True
if not help_:
if len(args) > 0:
input_file = args[0]
rop = RopBind()
try:
try:
if input_file is not None:
input_ = rop.create_input(path=input_file)
else:
input_ = rop.create_input(reader=stdin_reader)
except RopError as err:
print("Failed to open source: error {}".format(hex(err.err_code)))
raise
if not json:
try:
output = rop.create_output(writer=stdout_writer)
except RopError as err:
print("Failed to open stdout: error {}".format(hex(err.err_code)))
raise
input_.dump_packets_to_output(output, mpi=mpi, raw=raw, grip=grip)
else:
jsn = input_.dump_packets_to_json(mpi=mpi, raw=raw, grip=grip)
if json_out is None:
print(jsn)
print('')
else:
json_out.append(jsn)
except RopError as err:
# Inform in case of error occured during parsing
print("Operation failed [error code: {}]".format(hex(err.err_code)))
raise
finally:
rop.close()
else:
print_usage(argv[0])
if __name__ == '__main__':
execute(sys.argv)
| 34.882813
| 90
| 0.606271
|
37e48938505b864e267daa0716d612c1171bbe4b
| 3,735
|
py
|
Python
|
h2o-py/tests/testdir_parser/pyunit_PUBDEV_5705_drop_columns_parser_gz_large.py
|
ahmedengu/h2o-3
|
ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11
|
[
"Apache-2.0"
] | 2
|
2019-09-02T15:49:45.000Z
|
2019-09-02T16:01:58.000Z
|
h2o-py/tests/testdir_parser/pyunit_PUBDEV_5705_drop_columns_parser_gz_large.py
|
ahmedengu/h2o-3
|
ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11
|
[
"Apache-2.0"
] | 2
|
2021-06-02T02:24:03.000Z
|
2021-11-15T17:51:49.000Z
|
h2o-py/tests/testdir_parser/pyunit_PUBDEV_5705_drop_columns_parser_gz_large.py
|
ahmedengu/h2o-3
|
ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11
|
[
"Apache-2.0"
] | 1
|
2021-05-23T07:41:39.000Z
|
2021-05-23T07:41:39.000Z
|
from __future__ import print_function
import sys
sys.path.insert(1, "../../")
import h2o
from tests import pyunit_utils
import random
def import_zip_skipped_columns():
# checking out zip file
airlineFull = h2o.import_file(path=pyunit_utils.locate("smalldata/jira/adult.gz"))
filePath = pyunit_utils.locate("smalldata/jira/adult.gz")
skip_all = list(range(airlineFull.ncol))
skip_even = list(range(0, airlineFull.ncol, 2))
skip_odd = list(range(1, airlineFull.ncol, 2))
skip_start_end = [0, airlineFull.ncol - 1]
skip_except_last = list(range(0, airlineFull.ncol - 2))
skip_except_first = list(range(1, airlineFull.ncol))
temp = list(range(0, airlineFull.ncol))
random.shuffle(temp)
skip_random = []
for index in range(0, airlineFull.ncol // 2):
skip_random.append(temp[index])
skip_random.sort()
try:
bad = h2o.import_file(filePath, skipped_columns=skip_all) # skipped all
sys.exit(1)
except Exception as ex:
print(ex)
pass
try:
bad = h2o.upload_file(filePath, skipped_columns=skip_all) # skipped all
sys.exit(1)
except Exception as ex:
print(ex)
pass
# skip odd columns
pyunit_utils.checkCorrectSkips(airlineFull, filePath, skip_odd)
# skip even columns
pyunit_utils.checkCorrectSkips(airlineFull, filePath, skip_even)
# skip the very beginning and the very end.
pyunit_utils.checkCorrectSkips(airlineFull, filePath, skip_start_end)
# skip all except the last column
pyunit_utils.checkCorrectSkips(airlineFull, filePath, skip_except_last)
# skip all except the very first column
pyunit_utils.checkCorrectSkips(airlineFull, filePath, skip_except_first)
# randomly skipped half the columns
pyunit_utils.checkCorrectSkips(airlineFull, filePath, skip_random)
def checkCorrectSkips(originalFullFrame, csvfile, skipped_columns):
skippedFrameUF = h2o.upload_file(csvfile, skipped_columns=skipped_columns)
skippedFrameIF = h2o.import_file(csvfile, skipped_columns=skipped_columns) # this two frames should be the same
pyunit_utils.compare_frames_local(skippedFrameUF, skippedFrameIF, prob=0.5)
skipCounter = 0
typeDict = originalFullFrame.types
frameNames = originalFullFrame.names
for cindex in range(len(frameNames)):
if cindex not in skipped_columns:
print("Checking column {0}...".format(cindex))
if typeDict[frameNames[cindex]] == u'enum' and cindex==10: # look at original frame
continue
elif typeDict[frameNames[cindex]] == u'enum' and not(skipCounter==10):
pyunit_utils.compare_frames_local_onecolumn_NA_enum(originalFullFrame[cindex],
skippedFrameIF[skipCounter], prob=1, tol=1e-10,
returnResult=False)
elif typeDict[frameNames[cindex]] == u'string':
pyunit_utils.compare_frames_local_onecolumn_NA_string(originalFullFrame[cindex],
skippedFrameIF[skipCounter], prob=1,
returnResult=False)
elif typeDict[frameNames[cindex]] == u'int':
pyunit_utils.compare_frames_local_onecolumn_NA(originalFullFrame[cindex], skippedFrameIF[skipCounter].asnumeric(),
prob=1, tol=1e-10, returnResult=False)
skipCounter = skipCounter + 1
if __name__ == "__main__":
pyunit_utils.standalone_test(import_zip_skipped_columns)
else:
import_zip_skipped_columns()
| 41.043956
| 130
| 0.658099
|
d0e0bdb2114c615be1cee968a97083d3cc2a5e3a
| 1,701
|
py
|
Python
|
app/core/migrations/0001_initial.py
|
Caiphe/recipe-app-api
|
f7a9b9b1a2319f54b20779e521bcec94a1c7dae7
|
[
"MIT"
] | null | null | null |
app/core/migrations/0001_initial.py
|
Caiphe/recipe-app-api
|
f7a9b9b1a2319f54b20779e521bcec94a1c7dae7
|
[
"MIT"
] | null | null | null |
app/core/migrations/0001_initial.py
|
Caiphe/recipe-app-api
|
f7a9b9b1a2319f54b20779e521bcec94a1c7dae7
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.2 on 2020-08-01 04:38
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 50.029412
| 266
| 0.637272
|
d768c8cb6c4972e7698729d195817b541afa6a39
| 1,378
|
py
|
Python
|
numba/cuda/tests/cudapy/test_nondet.py
|
auderson/numba
|
3d67c9850ab56457f418cf40af6245fd9c337705
|
[
"BSD-2-Clause"
] | 6,620
|
2015-01-04T08:51:04.000Z
|
2022-03-31T12:52:18.000Z
|
numba/cuda/tests/cudapy/test_nondet.py
|
auderson/numba
|
3d67c9850ab56457f418cf40af6245fd9c337705
|
[
"BSD-2-Clause"
] | 6,457
|
2015-01-04T03:18:41.000Z
|
2022-03-31T17:38:42.000Z
|
numba/cuda/tests/cudapy/test_nondet.py
|
auderson/numba
|
3d67c9850ab56457f418cf40af6245fd9c337705
|
[
"BSD-2-Clause"
] | 930
|
2015-01-25T02:33:03.000Z
|
2022-03-30T14:10:32.000Z
|
import numpy as np
from numba import cuda, float32, void
from numba.cuda.testing import unittest, CUDATestCase
def generate_input(n):
A = np.array(np.arange(n * n).reshape(n, n), dtype=np.float32)
B = np.array(np.arange(n) + 0, dtype=A.dtype)
return A, B
class TestCudaNonDet(CUDATestCase):
def test_for_pre(self):
"""Test issue with loop not running due to bad sign-extension at the for loop
precondition.
"""
@cuda.jit(void(float32[:, :], float32[:, :], float32[:]))
def diagproduct(c, a, b):
startX, startY = cuda.grid(2)
gridX = cuda.gridDim.x * cuda.blockDim.x
gridY = cuda.gridDim.y * cuda.blockDim.y
height = c.shape[0]
width = c.shape[1]
for x in range(startX, width, (gridX)):
for y in range(startY, height, (gridY)):
c[y, x] = a[y, x] * b[x]
N = 8
A, B = generate_input(N)
F = np.empty(A.shape, dtype=A.dtype)
blockdim = (32, 8)
griddim = (1, 1)
dA = cuda.to_device(A)
dB = cuda.to_device(B)
dF = cuda.to_device(F, copy=False)
diagproduct[griddim, blockdim](dF, dA, dB)
E = np.dot(A, np.diag(B))
np.testing.assert_array_almost_equal(dF.copy_to_host(), E)
if __name__ == '__main__':
unittest.main()
| 27.56
| 85
| 0.560958
|
cffcad09e26f08a28883039a50bd11651bc83a4b
| 6,831
|
py
|
Python
|
holoviews/plotting/plotly/callbacks.py
|
petros1999/holoviews
|
7499b421f349d141c7c05b18517198e0833773cf
|
[
"BSD-3-Clause"
] | null | null | null |
holoviews/plotting/plotly/callbacks.py
|
petros1999/holoviews
|
7499b421f349d141c7c05b18517198e0833773cf
|
[
"BSD-3-Clause"
] | 1
|
2021-04-17T15:31:36.000Z
|
2021-04-17T15:31:36.000Z
|
holoviews/plotting/plotly/callbacks.py
|
petros1999/holoviews
|
7499b421f349d141c7c05b18517198e0833773cf
|
[
"BSD-3-Clause"
] | 1
|
2020-06-02T09:16:36.000Z
|
2020-06-02T09:16:36.000Z
|
from weakref import WeakValueDictionary
from param.parameterized import add_metaclass
from ...streams import (
Stream, Selection1D, RangeXY, RangeX, RangeY, BoundsXY, BoundsX, BoundsY,
SelectionXY
)
from .util import _trace_to_subplot
class PlotlyCallbackMetaClass(type):
"""
Metaclass for PlotlyCallback classes.
We want each callback class to keep track of all of the instances of the class.
Using a meta class here lets us keep the logic for instance tracking in one place.
"""
def __init__(cls, name, bases, attrs):
super(PlotlyCallbackMetaClass, cls).__init__(name, bases, attrs)
# Create weak-value dictionary to hold instances of the class
cls.instances = WeakValueDictionary()
def __call__(cls, *args, **kwargs):
inst = super(PlotlyCallbackMetaClass, cls).__call__(*args, **kwargs)
# Store weak reference to the callback instance in the _instances
# WeakValueDictionary. This will allow instances to be garbage collected and
# the references will be automatically removed from the colleciton when this
# happens.
cls.instances[inst.plot.trace_uid] = inst
return inst
@add_metaclass(PlotlyCallbackMetaClass)
class PlotlyCallback(object):
def __init__(self, plot, streams, source, **params):
self.plot = plot
self.streams = streams
self.source = source
@classmethod
def update_streams_from_property_update(cls, property_value, fig_dict):
raise NotImplementedError()
class Selection1DCallback(PlotlyCallback):
callback_property = "selected_data"
@classmethod
def update_streams_from_property_update(cls, selected_data, fig_dict):
traces = fig_dict.get('data', [])
# build event data and compute which trace UIDs are eligible
# Look up callback with UID
# graph reference and update the streams
point_inds = {}
if selected_data:
for point in selected_data['points']:
point_inds.setdefault(point['curveNumber'], [])
point_inds[point['curveNumber']].append(point['pointNumber'])
for trace_ind, trace in enumerate(traces):
trace_uid = trace.get('uid', None)
if trace_uid in cls.instances:
cb = cls.instances[trace_uid]
new_index = point_inds.get(trace_ind, [])
for stream in cb.streams:
stream.event(index=new_index)
class BoundsCallback(PlotlyCallback):
callback_property = "selected_data"
boundsx = False
boundsy = False
@classmethod
def update_streams_from_property_update(cls, selected_data, fig_dict):
traces = fig_dict.get('data', [])
if not selected_data or 'range' not in selected_data:
# No valid box selection
box = None
else:
# Get x and y axis references
box = selected_data["range"]
axis_refs = list(box)
xref = [ref for ref in axis_refs if ref.startswith('x')][0]
yref = [ref for ref in axis_refs if ref.startswith('y')][0]
# Process traces
for trace_ind, trace in enumerate(traces):
trace_type = trace.get('type', 'scatter')
trace_uid = trace.get('uid', None)
if (trace_uid not in cls.instances or
_trace_to_subplot.get(trace_type, None) != ['xaxis', 'yaxis']):
continue
cb = cls.instances[trace_uid]
if (box and trace.get('xaxis', 'x') == xref and
trace.get('yaxis', 'y') == yref):
new_bounds = (box[xref][0], box[yref][0], box[xref][1], box[yref][1])
if cls.boundsx and cls.boundsy:
event_kwargs = dict(bounds=new_bounds)
elif cls.boundsx:
event_kwargs = dict(boundsx=(new_bounds[0], new_bounds[2]))
elif cls.boundsy:
event_kwargs = dict(boundsy=(new_bounds[1], new_bounds[3]))
else:
event_kwargs = dict()
for stream in cb.streams:
stream.event(**event_kwargs)
else:
if cls.boundsx and cls.boundsy:
event_kwargs = dict(bounds=None)
elif cls.boundsx:
event_kwargs = dict(boundsx=None)
elif cls.boundsy:
event_kwargs = dict(boundsy=None)
else:
event_kwargs = dict()
for stream in cb.streams:
stream.event(**event_kwargs)
class BoundsXYCallback(BoundsCallback):
boundsx = True
boundsy = True
class BoundsXCallback(BoundsCallback):
boundsx = True
class BoundsYCallback(BoundsCallback):
boundsy = True
class RangeCallback(PlotlyCallback):
callback_property = "viewport"
x_range = False
y_range = False
@classmethod
def update_streams_from_property_update(cls, viewport, fig_dict):
traces = fig_dict.get('data', [])
# Process traces
for trace_ind, trace in enumerate(traces):
trace_type = trace.get('type', 'scatter')
trace_uid = trace.get('uid', None)
if (trace_uid not in cls.instances or
_trace_to_subplot.get(trace_type, None) != ['xaxis', 'yaxis']):
continue
xaxis = trace.get('xaxis', 'x').replace('x', 'xaxis')
yaxis = trace.get('yaxis', 'y').replace('y', 'yaxis')
xprop = '{xaxis}.range'.format(xaxis=xaxis)
yprop = '{yaxis}.range'.format(yaxis=yaxis)
if not viewport or xprop not in viewport or yprop not in viewport:
x_range = None
y_range = None
else:
x_range = tuple(viewport[xprop])
y_range = tuple(viewport[yprop])
stream_kwargs = {}
if cls.x_range:
stream_kwargs['x_range'] = x_range
if cls.y_range:
stream_kwargs['y_range'] = y_range
cb = cls.instances[trace_uid]
for stream in cb.streams:
stream.event(**stream_kwargs)
class RangeXYCallback(RangeCallback):
x_range = True
y_range = True
class RangeXCallback(RangeCallback):
x_range = True
class RangeYCallback(RangeCallback):
y_range = True
callbacks = Stream._callbacks['plotly']
callbacks[Selection1D] = Selection1DCallback
callbacks[SelectionXY] = BoundsXYCallback
callbacks[BoundsXY] = BoundsXYCallback
callbacks[BoundsX] = BoundsXCallback
callbacks[BoundsY] = BoundsYCallback
callbacks[RangeXY] = RangeXYCallback
callbacks[RangeX] = RangeXCallback
callbacks[RangeY] = RangeYCallback
| 31.334862
| 86
| 0.608549
|
be46144cf6e43548318ab5bb8a4495d5a5fcf30c
| 76
|
py
|
Python
|
pydfu/scanner.py
|
rrajaravi/pydfu
|
8a3847f5c34672ace75ba23b3b01f91e71650fc6
|
[
"MIT"
] | 1
|
2020-02-05T08:38:21.000Z
|
2020-02-05T08:38:21.000Z
|
pydfu/scanner.py
|
rrajaravi/pydfs
|
8a3847f5c34672ace75ba23b3b01f91e71650fc6
|
[
"MIT"
] | null | null | null |
pydfu/scanner.py
|
rrajaravi/pydfs
|
8a3847f5c34672ace75ba23b3b01f91e71650fc6
|
[
"MIT"
] | 2
|
2019-08-23T03:14:37.000Z
|
2020-02-05T08:38:51.000Z
|
class Scanner(object):
def run(self):
print("Running scanner")
| 15.2
| 32
| 0.618421
|
24ccb750fdb871ed8d62e86af97387279b7facf3
| 17,033
|
py
|
Python
|
db/python/layers/family.py
|
jeremiahwander/sample-metadata
|
c494bf4bab3450c721cfb00d377c372d370852cb
|
[
"MIT"
] | null | null | null |
db/python/layers/family.py
|
jeremiahwander/sample-metadata
|
c494bf4bab3450c721cfb00d377c372d370852cb
|
[
"MIT"
] | 40
|
2021-05-05T23:53:36.000Z
|
2022-03-29T23:50:02.000Z
|
db/python/layers/family.py
|
jeremiahwander/sample-metadata
|
c494bf4bab3450c721cfb00d377c372d370852cb
|
[
"MIT"
] | 2
|
2021-12-13T17:51:20.000Z
|
2022-02-23T22:46:57.000Z
|
# pylint: disable=used-before-assignment
from typing import List, Union, Optional
from db.python.connect import Connection
from db.python.layers.base import BaseLayer
from db.python.layers.participant import ParticipantLayer
from db.python.tables.family import FamilyTable
from db.python.tables.family_participant import FamilyParticipantTable
from db.python.tables.participant import ParticipantTable
from db.python.tables.project import ProjectId
class PedRow:
"""Class for capturing a row in a pedigree"""
PedRowKeys = {
# seqr individual template:
# Family ID, Individual ID, Paternal ID, Maternal ID, Sex, Affected, Status, Notes
'family_id': {'familyid', 'family id', 'family', 'family_id'},
'individual_id': {'individualid', 'id', 'individual_id', 'individual id'},
'paternal_id': {'paternal_id', 'paternal id', 'paternalid', 'father'},
'maternal_id': {'maternal_id', 'maternal id', 'maternalid', 'mother'},
'sex': {'sex', 'gender'},
'affected': {'phenotype', 'affected', 'phenotypes', 'affected status'},
'notes': {'notes'},
}
@staticmethod
def default_header():
"""Default header (corresponds to the __init__ keys)"""
return [
'family_id',
'individual_id',
'paternal_id',
'maternal_id',
'sex',
'affected',
'notes',
]
@staticmethod
def row_header():
"""Default RowHeader for output"""
return [
'#Family ID',
'Individual ID',
'Paternal ID',
'Maternal ID',
'Sex',
'Affected',
]
def __init__(
self,
family_id,
individual_id,
paternal_id,
maternal_id,
sex,
affected,
notes=None,
):
self.family_id = family_id
self.individual_id = individual_id
self.paternal_id = None
self.maternal_id = None
if paternal_id is not None and paternal_id not in ('0', 0, ''):
self.paternal_id = paternal_id
if maternal_id is not None and maternal_id not in ('0', 0, ''):
self.maternal_id = maternal_id
self.sex = self.parse_sex(sex)
self.affected = int(affected)
self.notes = notes
@staticmethod
def parse_sex(sex: Union[str, int]):
"""
Parse the pedigree SEX value:
0: unknown
1: male (also accepts 'm')
2: female (also accepts 'f')
"""
if isinstance(sex, str) and sex.isdigit():
sex = int(sex)
if isinstance(sex, int):
if 0 <= sex <= 2:
return sex
raise ValueError(f'Sex value ({sex}) was not an expected value [0, 1, 2].')
sl = sex.lower()
if sl == 'm':
return 1
if sl == 'f':
return 2
raise ValueError(f'Unknown sex "{sex}", please ensure sex is in (0, 1, 2)')
def __str__(self):
return f'PedRow: {self.individual_id} ({self.sex})'
@staticmethod
def order(rows: List['PedRow']) -> List['PedRow']:
"""
Order a list of PedRows, but also validates:
- There are no circular dependencies
- All maternal / paternal IDs are found in the pedigree
"""
rows_to_order: List['PedRow'] = [*rows]
ordered = []
seen_individuals = set()
remaining_iterations_in_round = len(rows_to_order)
while len(rows_to_order) > 0:
row = rows_to_order.pop(0)
reqs = [row.paternal_id, row.maternal_id]
if all(r is None or r in seen_individuals for r in reqs):
remaining_iterations_in_round = len(rows_to_order)
ordered.append(row)
seen_individuals.add(row.individual_id)
else:
remaining_iterations_in_round -= 1
rows_to_order.append(row)
# makes more sense to keep this comparison separate:
# - If remaining iterations is or AND we still have rows
# - Then raise an Exception
# pylint: disable=chained-comparison
if remaining_iterations_in_round <= 0 and len(rows_to_order) > 0:
participant_ids = ', '.join(r.individual_id for r in rows_to_order)
raise Exception(
"There was an issue in the pedigree, either a parent wasn't found in the pedigree, "
"or a circular dependency detected (eg: someone's child is an ancestor's parent). "
f"Can't resolve participants: {participant_ids}"
)
return ordered
@staticmethod
def parse_header_order(header: List[str]):
"""
Takes a list of unformatted headers, and returns a list of ordered init_keys
>>> PedRow.parse_header_order(['family', 'mother', 'paternal id', 'phenotypes', 'gender'])
['family_id', 'maternal_id', 'paternal_id', 'affected', 'sex']
>>> PedRow.parse_header_order(['#family id'])
['family_id']
>>> PedRow.parse_header_order(['unexpected header'])
Traceback (most recent call last):
ValueError: Unable to identity header elements: "unexpected header"
"""
ordered_init_keys = []
unmatched = []
for item in header:
litem = item.lower().strip().strip('#')
found = False
for h, options in PedRow.PedRowKeys.items():
for potential_key in options:
if potential_key == litem:
ordered_init_keys.append(h)
found = True
break
if found:
break
if not found:
unmatched.append(item)
if unmatched:
unmatched_headers_str = ', '.join(f'"{u}"' for u in unmatched)
raise ValueError(
'Unable to identity header elements: ' + unmatched_headers_str
)
return ordered_init_keys
class FamilyLayer(BaseLayer):
"""Layer for import logic"""
def __init__(self, connection: Connection):
super().__init__(connection)
self.ftable = FamilyTable(connection)
self.fptable = FamilyParticipantTable(self.connection)
async def get_families(self, project: int = None):
"""Get all families for a project"""
return await self.ftable.get_families(project=project)
async def update_family(
self,
id_: int,
external_id: str = None,
description: str = None,
coded_phenotype: str = None,
check_project_ids: bool = True,
) -> bool:
"""Update fields on some family"""
if check_project_ids:
project_ids = await self.ftable.get_projects_by_family_ids([id_])
await self.ptable.check_access_to_project_ids(
self.author, project_ids, readonly=False
)
return await self.ftable.update_family(
id_=id_,
external_id=external_id,
description=description,
coded_phenotype=coded_phenotype,
)
async def get_pedigree(
self,
project: ProjectId,
family_ids: List[int] = None,
# pylint: disable=invalid-name
replace_with_participant_external_ids=False,
# pylint: disable=invalid-name
replace_with_family_external_ids=False,
empty_participant_value='',
include_header=False,
) -> List[List[Optional[str]]]:
"""
Generate pedigree file for ALL families in project
(unless internal_family_ids is specified).
Use internal IDs unless specific options are specified.
"""
# this is important because a PED file MUST be ordered like this
ordered_keys = [
'family_id',
'participant_id',
'paternal_participant_id',
'maternal_participant_id',
'sex',
'affected',
]
pid_fields = {
'participant_id',
'paternal_participant_id',
'maternal_participant_id',
}
rows = await self.fptable.get_rows(project=project, family_ids=family_ids)
pmap, fmap = {}, {}
if replace_with_participant_external_ids:
participant_ids = set(
s
for r in rows
for s in [r[pfield] for pfield in pid_fields]
if s is not None
)
ptable = ParticipantTable(connection=self.connection)
pmap = await ptable.get_id_map_by_internal_ids(list(participant_ids))
if replace_with_family_external_ids:
family_ids = list(
set(r['family_id'] for r in rows if r['family_id'] is not None)
)
fmap = await self.ftable.get_id_map_by_internal_ids(list(family_ids))
formatted_rows = []
if include_header:
formatted_rows.append(PedRow.row_header())
for row in rows:
formatted_row = []
for field in ordered_keys:
value = row[field]
if field == 'family_id':
formatted_row.append(fmap.get(value, value))
elif field in pid_fields:
formatted_row.append(
pmap.get(value, value) or empty_participant_value
)
else:
formatted_row.append(value)
formatted_rows.append(formatted_row)
return formatted_rows
async def get_participant_family_map(
self, participant_ids: List[int], check_project_ids=False
):
"""Get participant family map"""
fptable = FamilyParticipantTable(self.connection)
projects, family_map = await fptable.get_participant_family_map(
participant_ids=participant_ids
)
if check_project_ids:
raise NotImplementedError(f'Must check specified projects: {projects}')
return family_map
async def import_pedigree(
self,
header: Optional[List[str]],
rows: List[List[str]],
create_missing_participants=False,
):
"""
Import pedigree file
"""
if header is None:
_header = PedRow.default_header()
else:
_header = PedRow.parse_header_order(header)
if len(rows) == 0:
return None
max_row_length = len(rows[0])
if max_row_length > len(_header):
raise ValueError(
f"The parsed header {_header} isn't long enough "
f'to cover row length ({len(_header)} < {len(rows[0])})'
)
if len(_header) > max_row_length:
_header = _header[:max_row_length]
pedrows: List[PedRow] = [
PedRow(**{_header[i]: r[i] for i in range(len(_header))}) for r in rows
]
# this validates a lot of the pedigree too
pedrows = PedRow.order(pedrows)
external_family_ids = set(r.family_id for r in pedrows)
# get set of all individual, paternal, maternal participant ids
external_participant_ids = set(
pid
for r in pedrows
for pid in [r.individual_id, r.paternal_id, r.maternal_id]
if pid
)
participant_table = ParticipantLayer(self.connection)
external_family_id_map = await self.ftable.get_id_map_by_external_ids(
list(external_family_ids),
project=self.connection.project,
allow_missing=True,
)
missing_external_family_ids = [
f for f in external_family_ids if f not in external_family_id_map
]
external_participant_ids_map = await participant_table.get_id_map_by_external_ids(
list(external_participant_ids),
project=self.connection.project,
# Allow missing participants if we're creating them
allow_missing=create_missing_participants,
)
async with self.connection.connection.transaction():
if create_missing_participants:
missing_participant_ids = set(external_participant_ids) - set(
external_participant_ids_map
)
for row in pedrows:
if row.individual_id not in missing_participant_ids:
continue
external_participant_ids_map[
row.individual_id
] = await participant_table.create_participant(
external_id=row.individual_id, reported_sex=row.sex
)
for external_family_id in missing_external_family_ids:
internal_family_id = await self.ftable.create_family(
external_id=external_family_id,
description=None,
coded_phenotype=None,
)
external_family_id_map[external_family_id] = internal_family_id
# now let's map participants back
insertable_rows = [
{
'family_id': external_family_id_map[row.family_id],
'participant_id': external_participant_ids_map[row.individual_id],
'paternal_participant_id': external_participant_ids_map.get(
row.paternal_id
),
'maternal_participant_id': external_participant_ids_map.get(
row.maternal_id
),
'affected': row.affected,
'notes': row.notes,
}
for row in pedrows
]
await participant_table.update_participants(
participant_ids=[
external_participant_ids_map[row.individual_id] for row in pedrows
],
reported_sexes=[row.sex for row in pedrows],
)
await self.fptable.create_rows(insertable_rows)
return True
async def import_families(
self, headers: Optional[List[str]], rows: List[List[str]]
):
"""Import a family table"""
ordered_headers = [
'Family ID',
'Display Name',
'Description',
'Coded Phenotype',
]
_headers = headers or ordered_headers[: len(rows[0])]
lheaders = [k.lower() for k in _headers]
key_map = {
'externalId': {'family_id', 'family id', 'familyid'},
'displayName': {'display name', 'displayname', 'display_name'},
'description': {'description'},
'phenotype': {
'coded phenotype',
'phenotype',
'codedphenotype',
'coded_phenotype',
},
}
def get_idx_for_header(header) -> Optional[int]:
return next(
iter(idx for idx, key in enumerate(lheaders) if key in key_map[header]),
None,
)
external_identifier_idx = get_idx_for_header('externalId')
display_name_idx = get_idx_for_header('displayName')
description_idx = get_idx_for_header('description')
phenotype_idx = get_idx_for_header('phenotype')
# replace empty strings with None
def replace_empty_string_with_none(val):
"""Don't set as empty string, prefer to set as null"""
return None if val == '' else val
rows = [[replace_empty_string_with_none(el) for el in r] for r in rows]
empty = [None] * len(rows)
def select_columns(col1: Optional[int], col2: Optional[int] = None):
"""
- If col1 and col2 is None, return [None] * len(rows)
- if either col1 or col2 is not None, return that column
- else, return a mixture of column col1 | col2 if set
"""
if col1 is None and col2 is None:
# if col1 AND col2 is NONE
return empty
if col1 is not None and col2 is None:
# if only col1 is set
return [r[col1] for r in rows]
if col2 is not None and col1 is None:
# if only col2 is set
return [r[col2] for r in rows]
# if col1 AND col2 are not None
assert col1 is not None and col2 is not None
return [r[col1] if r[col1] is not None else r[col2] for r in rows]
await self.ftable.insert_or_update_multiple_families(
external_ids=select_columns(external_identifier_idx, display_name_idx),
descriptions=select_columns(description_idx),
coded_phenotypes=select_columns(phenotype_idx),
)
return True
| 35.708595
| 104
| 0.567369
|
c92fa19098f3ba8a22a93e1551251c053740a11b
| 1,990
|
py
|
Python
|
tests/raw/test_instance_usable.py
|
johny-b/blargh
|
45bb94cad8c70b0cd5b0b4f1330682107051fb9d
|
[
"MIT"
] | null | null | null |
tests/raw/test_instance_usable.py
|
johny-b/blargh
|
45bb94cad8c70b0cd5b0b4f1330682107051fb9d
|
[
"MIT"
] | 3
|
2019-07-09T08:01:36.000Z
|
2020-07-08T10:18:52.000Z
|
tests/raw/test_instance_usable.py
|
johny-b/blargh
|
45bb94cad8c70b0cd5b0b4f1330682107051fb9d
|
[
"MIT"
] | null | null | null |
'''
Written/deleted instances should be no longer usable.
This is a safeguard agains misusing Instance class.
Single world().write() writes all instances, so even
those not changed should become unusable.
'''
from blargh.engine import world
from blargh import exceptions
from ..helpers.common import related
from example import family
import pytest
def test_usable_1(init_world):
init_world(family.dm)
world().begin()
child = world().get_instance('child', 1)
father = related(child, 'father')
mother = related(child, 'mother')
# this one is not affected by change, but should still be unusable,
# because world writes all current instances
other_child = world().get_instance('child', 2)
child.delete()
world().write()
assert not child.usable
assert not father.usable
assert not mother.usable
assert not other_child.usable
def test_usable_2(init_world):
init_world(family.dm)
world().begin()
child = world().get_instance('child', 1)
other_child = world().get_instance('child', 2)
world().write()
assert not child.usable
assert not other_child.usable
def test_usable_3(init_world):
init_world(family.dm)
world().begin()
child = world().new_instance('child')
# fresh instance is fine ...
assert child.usable
# ... until written
world().write()
assert not child.usable
def test_usable_4(init_world):
'''attempt to update not usable instance should raise an exception'''
init_world(family.dm)
world().begin()
child = world().get_instance('child', 1)
world().write()
with pytest.raises(exceptions.ProgrammingError):
child.update(dict(name='aaa'))
def test_usable_5(init_world):
'''attempt to delete not usable instance should raise an exception'''
init_world(family.dm)
world().begin()
child = world().get_instance('child', 1)
world().write()
with pytest.raises(Exception):
child.delete()
| 25.512821
| 73
| 0.686432
|
29161fb3c4c2ffc96106d360205ac4c60fb3b70b
| 1,146
|
py
|
Python
|
mysite/mathswizard/migrations/0002_auto_20171121_1551.py
|
JayH117/Final_Year_Project
|
73b31480738f1e957ee6e8f8be28df003732988a
|
[
"MIT"
] | null | null | null |
mysite/mathswizard/migrations/0002_auto_20171121_1551.py
|
JayH117/Final_Year_Project
|
73b31480738f1e957ee6e8f8be28df003732988a
|
[
"MIT"
] | null | null | null |
mysite/mathswizard/migrations/0002_auto_20171121_1551.py
|
JayH117/Final_Year_Project
|
73b31480738f1e957ee6e8f8be28df003732988a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-21 15:51
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('mathswizard', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='StudentProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('teacher', models.CharField(default='', max_length=100)),
('city', models.CharField(default='', max_length=100)),
('school', models.CharField(default='', max_length=100)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.RenameModel(
old_name='UserProfile',
new_name='TeacherProfile',
),
]
| 34.727273
| 122
| 0.604712
|
20fa748c452b56337e4b58b2ad34ade65622bf34
| 4,400
|
py
|
Python
|
test/functional/interface_bitcoin_cli.py
|
CampoDiFiori/bitcoinvault
|
9cba2b54d7564651192bae4f25b511f9bf97bfb0
|
[
"MIT"
] | 23
|
2020-03-09T13:08:45.000Z
|
2021-04-22T09:39:25.000Z
|
test/functional/interface_bitcoin_cli.py
|
CampoDiFiori/bitcoinvault
|
9cba2b54d7564651192bae4f25b511f9bf97bfb0
|
[
"MIT"
] | 7
|
2020-05-07T02:05:40.000Z
|
2020-08-07T10:15:21.000Z
|
test/functional/interface_bitcoin_cli.py
|
CampoDiFiori/bitcoinvault
|
9cba2b54d7564651192bae4f25b511f9bf97bfb0
|
[
"MIT"
] | 24
|
2020-04-17T18:19:56.000Z
|
2022-01-25T10:39:30.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bvault-cli"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_process_error, get_auth_cookie
class TestBitcoinCli(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
"""Main test logic"""
cli_response = self.nodes[0].cli("-version").send_cli()
assert("Bitcoin Vault RPC client version" in cli_response)
self.log.info("Compare responses from getwalletinfo RPC and `bvault-cli getwalletinfo`")
if self.is_wallet_compiled():
cli_response = self.nodes[0].cli.getwalletinfo()
rpc_response = self.nodes[0].getwalletinfo()
assert_equal(cli_response, rpc_response)
self.log.info("Compare responses from getblockchaininfo RPC and `bvault-cli getblockchaininfo`")
cli_response = self.nodes[0].cli.getblockchaininfo()
rpc_response = self.nodes[0].getblockchaininfo()
assert_equal(cli_response, rpc_response)
user, password = get_auth_cookie(self.nodes[0].datadir)
self.log.info("Test -stdinrpcpass option")
assert_equal(0, self.nodes[0].cli('-rpcuser=%s' % user, '-stdinrpcpass', input=password).getblockcount())
assert_raises_process_error(1, "Incorrect rpcuser or rpcpassword", self.nodes[0].cli('-rpcuser=%s' % user, '-stdinrpcpass', input="foo").echo)
self.log.info("Test -stdin and -stdinrpcpass")
assert_equal(["foo", "bar"], self.nodes[0].cli('-rpcuser=%s' % user, '-stdin', '-stdinrpcpass', input=password + "\nfoo\nbar").echo())
assert_raises_process_error(1, "Incorrect rpcuser or rpcpassword", self.nodes[0].cli('-rpcuser=%s' % user, '-stdin', '-stdinrpcpass', input="foo").echo)
self.log.info("Test connecting to a non-existing server")
assert_raises_process_error(1, "Could not connect to the server", self.nodes[0].cli('-rpcport=1').echo)
self.log.info("Test connecting with non-existing RPC cookie file")
assert_raises_process_error(1, "Could not locate RPC credentials", self.nodes[0].cli('-rpccookiefile=does-not-exist', '-rpcpassword=').echo)
self.log.info("Make sure that -getinfo with arguments fails")
assert_raises_process_error(1, "-getinfo takes no arguments", self.nodes[0].cli('-getinfo').help)
self.log.info("Compare responses from `bvault-cli -getinfo` and the RPCs data is retrieved from.")
cli_get_info = self.nodes[0].cli('-getinfo').send_cli()
if self.is_wallet_compiled():
wallet_info = self.nodes[0].getwalletinfo()
network_info = self.nodes[0].getnetworkinfo()
blockchain_info = self.nodes[0].getblockchaininfo()
assert_equal(cli_get_info['version'], network_info['version'])
assert_equal(cli_get_info['protocolversion'], network_info['protocolversion'])
if self.is_wallet_compiled():
assert_equal(cli_get_info['walletversion'], wallet_info['walletversion'])
assert_equal(cli_get_info['balance'], wallet_info['balance'])
assert_equal(cli_get_info['blocks'], blockchain_info['blocks'])
assert_equal(cli_get_info['timeoffset'], network_info['timeoffset'])
assert_equal(cli_get_info['connections'], network_info['connections'])
assert_equal(cli_get_info['proxy'], network_info['networks'][0]['proxy'])
assert_equal(cli_get_info['difficulty'], blockchain_info['difficulty'])
assert_equal(cli_get_info['testnet'], blockchain_info['chain'] == "test")
if self.is_wallet_compiled():
assert_equal(cli_get_info['balance'], wallet_info['balance'])
assert_equal(cli_get_info['keypoololdest'], wallet_info['keypoololdest'])
assert_equal(cli_get_info['keypoolsize'], wallet_info['keypoolsize'])
assert_equal(cli_get_info['paytxfee'], wallet_info['paytxfee'])
assert_equal(cli_get_info['relayfee'], network_info['relayfee'])
# unlocked_until is not tested because the wallet is not encrypted
if __name__ == '__main__':
TestBitcoinCli().main()
| 55
| 160
| 0.695
|
89cc2db45a08fb2789c44b4dbdfd615b6dca7423
| 5,488
|
py
|
Python
|
classify.py
|
graviraja/pytorch-sample-codes
|
f5246c0d31a30302a5c9623f4354c8c2e0330d48
|
[
"MIT"
] | 22
|
2019-07-24T03:15:48.000Z
|
2022-03-29T02:50:19.000Z
|
classify.py
|
graviraja/pytorch-sample-codes
|
f5246c0d31a30302a5c9623f4354c8c2e0330d48
|
[
"MIT"
] | 6
|
2019-07-07T08:55:47.000Z
|
2021-03-16T04:10:30.000Z
|
classify.py
|
graviraja/pytorch-sample-codes
|
f5246c0d31a30302a5c9623f4354c8c2e0330d48
|
[
"MIT"
] | 11
|
2019-12-05T09:09:23.000Z
|
2022-01-16T10:53:45.000Z
|
""" Char level RNN to classify words.
We try to classify surnames from 18 languages.
"""
from __future__ import unicode_literals, print_function, division
from io import open
import glob
import os
import unicodedata
import string
import torch
import torch.nn as nn
import random
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
all_letters = string.ascii_letters + " .,;'"
n_letters = len(all_letters)
def findFiles(path):
return glob.glob(path)
def unicodeToAscii(s):
# convert the unicode string to plain ascii
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn' and c in all_letters
)
category_lines = {}
all_categories = []
def readLines(filename):
# read a file and split into lines.
lines = open(filename, encoding='utf-8').read().strip().split('\n')
return [unicodeToAscii(line) for line in lines]
for filename in findFiles('data/names/*.txt'):
category = os.path.splitext(os.path.basename(filename))[0]
all_categories.append(category)
lines = readLines(filename)
category_lines[category] = lines
n_categories = len(all_categories)
def letterToIndex(letter):
# find the index of given letter from all the letters.
return all_letters.find(letter)
def letterToTensor(letter):
# convert the letter into a tensor of shape (1, n_letters)
# shape is (1, n_letters) instead of (n_letters) because 1 is batch_size
# pytorch expects everything in batches
tensor = torch.zeros(1, n_letters)
tensor[0][letterToIndex(letter)] = 1
return tensor
def lineToTensor(line):
# convert the line into a tensor of shape (line_length, 1, n_letters)
# 1 in shape is batch size
tensor = torch.zeros(len(line), 1, n_letters)
for index, letter in enumerate(line):
tensor[index][0][letterToIndex(letter)] = 1
return tensor
def categoryFromOutput(output):
# return the category and category id from the output tensor
top_n, top_i = output.topk(1)
category_i = top_i[0].item()
return all_categories[category_i], category_i
def randomChoice(l):
# random pick a value from the list l
return l[random.randint(0, len(l) - 1)]
def randomTrainingExamples():
# randomly pick a category and randomly pick a line from that category
category = randomChoice(all_categories)
line = randomChoice(category_lines[category])
category_tensor = torch.tensor([all_categories.index(category)], dtype=torch.long)
line_tensor = lineToTensor(line)
return category, line, category_tensor, line_tensor
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
self.i2o = nn.Linear(input_size + hidden_size, output_size)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, x, hidden):
combined = torch.cat((x, hidden), 1)
hidden = self.i2h(combined)
output = self.i2o(combined)
output = self.softmax(output)
return output, hidden
def initHidden(self):
return torch.zeros(1, self.hidden_size)
n_hidden = 128
rnn = RNN(n_letters, 128, n_categories)
criterion = nn.NLLLoss()
optimizer = torch.optim.SGD(rnn.parameters(), lr=0.005)
def train(category_tensor, line_tensor):
hidden = rnn.initHidden()
for i in range(line_tensor.size()[0]):
output, hidden = rnn(line_tensor[i], hidden)
optimizer.zero_grad()
loss = criterion(output, category_tensor)
loss.backward()
optimizer.step()
return output, loss.item()
def evaluate(line_tensor):
with torch.no_grad():
hidden = rnn.initHidden()
for i in range(line_tensor.size()[0]):
output, hidden = rnn(line_tensor[i], hidden)
return output
avg_loss = 0
all_losses = []
for epoch in range(100000):
category, line, category_tensor, line_tensor = randomTrainingExamples()
output, loss = train(category_tensor, line_tensor)
avg_loss += loss
if (epoch + 1) % 5000 == 0:
guess, guess_i = categoryFromOutput(output)
correct = '✓' if guess == category else '✗ (%s)' % category
print(f"epoch : {epoch}, loss : {avg_loss / epoch}, {line} / {guess} {correct} ({category})")
all_losses.append(avg_loss)
avg_loss = 0
torch.save(rnn, 'char-rnn-classification.pt')
rnn = torch.load('char-rnn-classification.pt')
confusion = torch.zeros(n_categories, n_categories)
n_confusion = 10000
# Go through a bunch of examples and record which are correctly guessed
for i in range(n_confusion):
category, line, category_tensor, line_tensor = randomTrainingExamples()
output = evaluate(line_tensor)
guess, guess_i = categoryFromOutput(output)
category_i = all_categories.index(category)
confusion[category_i][guess_i] += 1
# Normalize by dividing every row by its sum
for i in range(n_categories):
confusion[i] = confusion[i] / confusion[i].sum()
# Set up plot
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(confusion.numpy())
fig.colorbar(cax)
# Set up axes
ax.set_xticklabels([''] + all_categories, rotation=90)
ax.set_yticklabels([''] + all_categories)
# Force label at every tick
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
# sphinx_gallery_thumbnail_number = 2
plt.show()
| 29.505376
| 101
| 0.700437
|
728d4c8046a3ec0d3538d46549285e08b589de2e
| 2,533
|
py
|
Python
|
dateparser/data/date_translation_data/az-Cyrl.py
|
bsekiewicz/dateparser
|
babc677372376d933de5542010af3097c26e49e9
|
[
"BSD-3-Clause"
] | 1,804
|
2015-01-01T23:01:54.000Z
|
2022-03-30T18:36:16.000Z
|
dateparser/data/date_translation_data/az-Cyrl.py
|
bsekiewicz/dateparser
|
babc677372376d933de5542010af3097c26e49e9
|
[
"BSD-3-Clause"
] | 948
|
2015-01-04T22:18:39.000Z
|
2022-03-31T16:29:41.000Z
|
dateparser/data/date_translation_data/az-Cyrl.py
|
bsekiewicz/dateparser
|
babc677372376d933de5542010af3097c26e49e9
|
[
"BSD-3-Clause"
] | 463
|
2015-01-10T08:53:39.000Z
|
2022-03-18T12:45:49.000Z
|
info = {
"name": "az-Cyrl",
"date_order": "DMY",
"january": [
"јан",
"јанвар"
],
"february": [
"фев",
"феврал"
],
"march": [
"мар",
"март"
],
"april": [
"апр",
"апрел"
],
"may": [
"май"
],
"june": [
"ијн",
"ијун"
],
"july": [
"ијл",
"ијул"
],
"august": [
"авг",
"август"
],
"september": [
"сен",
"сентјабр"
],
"october": [
"окт",
"октјабр"
],
"november": [
"ној",
"нојабр"
],
"december": [
"дек",
"декабр"
],
"monday": [
"базар ертәси",
"бе"
],
"tuesday": [
"ча",
"чәршәнбә ахшамы"
],
"wednesday": [
"ч",
"чәршәнбә"
],
"thursday": [
"ҹа",
"ҹүмә ахшамы"
],
"friday": [
"ҹ",
"ҹүмә"
],
"saturday": [
"ш",
"шәнбә"
],
"sunday": [
"б",
"базар"
],
"am": [
"ам"
],
"pm": [
"пм"
],
"year": [
"year"
],
"month": [
"month"
],
"week": [
"week"
],
"day": [
"day"
],
"hour": [
"hour"
],
"minute": [
"minute"
],
"second": [
"second"
],
"relative-type": {
"0 day ago": [
"today"
],
"0 hour ago": [
"this hour"
],
"0 minute ago": [
"this minute"
],
"0 month ago": [
"this month"
],
"0 second ago": [
"now"
],
"0 week ago": [
"this week"
],
"0 year ago": [
"this year"
],
"1 day ago": [
"yesterday"
],
"1 month ago": [
"last month"
],
"1 week ago": [
"last week"
],
"1 year ago": [
"last year"
],
"in 1 day": [
"tomorrow"
],
"in 1 month": [
"next month"
],
"in 1 week": [
"next week"
],
"in 1 year": [
"next year"
]
},
"locale_specific": {},
"skip": [
" ",
"'",
",",
"-",
".",
"/",
";",
"@",
"[",
"]",
"|",
","
]
}
| 14.988166
| 26
| 0.259376
|
082cba67f7507881569b97db419e839a1f675f7c
| 52,721
|
bzl
|
Python
|
tensorflow/workspace.bzl
|
lezh/tensorflow
|
7a4e9467cb1a6604a43dc0ebc3e77322511643b7
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/workspace.bzl
|
lezh/tensorflow
|
7a4e9467cb1a6604a43dc0ebc3e77322511643b7
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/workspace.bzl
|
lezh/tensorflow
|
7a4e9467cb1a6604a43dc0ebc3e77322511643b7
|
[
"Apache-2.0"
] | null | null | null |
# TensorFlow external dependencies that can be loaded in WORKSPACE files.
load("//third_party/gpus:cuda_configure.bzl", "cuda_configure")
load("//third_party/gpus:rocm_configure.bzl", "rocm_configure")
load("//third_party/tensorrt:tensorrt_configure.bzl", "tensorrt_configure")
load("//third_party/nccl:nccl_configure.bzl", "nccl_configure")
load("//third_party/mkl:build_defs.bzl", "mkl_repository")
load("//third_party/git:git_configure.bzl", "git_configure")
load("//third_party/py:python_configure.bzl", "python_configure")
load("//third_party/sycl:sycl_configure.bzl", "sycl_configure")
load("//third_party/systemlibs:syslibs_configure.bzl", "syslibs_configure")
load("//third_party/toolchains/remote:configure.bzl", "remote_execution_configure")
load("//third_party/toolchains/clang6:repo.bzl", "clang6_configure")
load("//third_party/toolchains/cpus/arm:arm_compiler_configure.bzl", "arm_compiler_configure")
load("//third_party:repo.bzl", "tf_http_archive")
load("//third_party/clang_toolchain:cc_configure_clang.bzl", "cc_download_clang_toolchain")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_file")
load("@bazel_tools//tools/build_defs/repo:java.bzl", "java_import_external")
load("@io_bazel_rules_closure//closure:defs.bzl", "filegroup_external")
load(
"//tensorflow/tools/def_file_filter:def_file_filter_configure.bzl",
"def_file_filter_configure",
)
load("//third_party/FP16:workspace.bzl", FP16 = "repo")
load("//third_party/FXdiv:workspace.bzl", FXdiv = "repo")
load("//third_party/aws:workspace.bzl", aws = "repo")
load("//third_party/clog:workspace.bzl", clog = "repo")
load("//third_party/cpuinfo:workspace.bzl", cpuinfo = "repo")
load("//third_party/dlpack:workspace.bzl", dlpack = "repo")
load("//third_party/flatbuffers:workspace.bzl", flatbuffers = "repo")
load("//third_party/hexagon:workspace.bzl", hexagon_nn = "repo")
load("//third_party/highwayhash:workspace.bzl", highwayhash = "repo")
load("//third_party/hwloc:workspace.bzl", hwloc = "repo")
load("//third_party/icu:workspace.bzl", icu = "repo")
load("//third_party/jpeg:workspace.bzl", jpeg = "repo")
load("//third_party/nasm:workspace.bzl", nasm = "repo")
load("//third_party/opencl_headers:workspace.bzl", opencl_headers = "repo")
load("//third_party/kissfft:workspace.bzl", kissfft = "repo")
load("//third_party/pasta:workspace.bzl", pasta = "repo")
load("//third_party/psimd:workspace.bzl", psimd = "repo")
load("//third_party/pthreadpool:workspace.bzl", pthreadpool = "repo")
load("//third_party/ruy:workspace.bzl", ruy = "repo")
load("//third_party/sobol_data:workspace.bzl", sobol_data = "repo")
load("//third_party/vulkan_headers:workspace.bzl", vulkan_headers = "repo")
load("//third_party/toolchains/remote_config:configs.bzl", "initialize_rbe_configs")
def initialize_third_party():
""" Load third party repositories. See above load() statements. """
FP16()
FXdiv()
aws()
clog()
cpuinfo()
dlpack()
flatbuffers()
hexagon_nn()
highwayhash()
hwloc()
icu()
kissfft()
jpeg()
nasm()
opencl_headers()
pasta()
psimd()
pthreadpool()
sobol_data()
vulkan_headers()
ruy()
# Sanitize a dependency so that it works correctly from code that includes
# TensorFlow as a submodule.
def clean_dep(dep):
return str(Label(dep))
# If TensorFlow is linked as a submodule.
# path_prefix is no longer used.
# tf_repo_name is thought to be under consideration.
def tf_workspace(path_prefix = "", tf_repo_name = ""):
tf_repositories(path_prefix, tf_repo_name)
tf_bind()
# Define all external repositories required by TensorFlow
def tf_repositories(path_prefix = "", tf_repo_name = ""):
"""All external dependencies for TF builds."""
# Loads all external repos to configure RBE builds.
initialize_rbe_configs()
# Note that we check the minimum bazel version in WORKSPACE.
clang6_configure(name = "local_config_clang6")
cc_download_clang_toolchain(name = "local_config_download_clang")
cuda_configure(name = "local_config_cuda")
tensorrt_configure(name = "local_config_tensorrt")
nccl_configure(name = "local_config_nccl")
git_configure(name = "local_config_git")
sycl_configure(name = "local_config_sycl")
syslibs_configure(name = "local_config_syslibs")
python_configure(name = "local_config_python")
rocm_configure(name = "local_config_rocm")
remote_execution_configure(name = "local_config_remote_execution")
initialize_third_party()
# For windows bazel build
# TODO: Remove def file filter when TensorFlow can export symbols properly on Windows.
def_file_filter_configure(name = "local_config_def_file_filter")
# Point //external/local_config_arm_compiler to //external/arm_compiler
arm_compiler_configure(
name = "local_config_arm_compiler",
build_file = clean_dep("//third_party/toolchains/cpus/arm:BUILD"),
remote_config_repo = "../arm_compiler",
)
mkl_repository(
name = "mkl_linux",
build_file = clean_dep("//third_party/mkl:mkl.BUILD"),
sha256 = "a936d6b277a33d2a027a024ea8e65df62bd2e162c7ca52c48486ed9d5dc27160",
strip_prefix = "mklml_lnx_2019.0.5.20190502",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/intel/mkl-dnn/releases/download/v0.21/mklml_lnx_2019.0.5.20190502.tgz",
"https://github.com/intel/mkl-dnn/releases/download/v0.21/mklml_lnx_2019.0.5.20190502.tgz",
],
)
mkl_repository(
name = "mkl_windows",
build_file = clean_dep("//third_party/mkl:mkl.BUILD"),
sha256 = "33cc27652df3b71d7cb84b26718b5a2e8965e2c864a502347db02746d0430d57",
strip_prefix = "mklml_win_2020.0.20190813",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/intel/mkl-dnn/releases/download/v0.21/mklml_win_2020.0.20190813.zip",
"https://github.com/intel/mkl-dnn/releases/download/v0.21/mklml_win_2020.0.20190813.zip",
],
)
mkl_repository(
name = "mkl_darwin",
build_file = clean_dep("//third_party/mkl:mkl.BUILD"),
sha256 = "2fbb71a0365d42a39ea7906568d69b1db3bfc9914fee75eedb06c5f32bf5fa68",
strip_prefix = "mklml_mac_2019.0.5.20190502",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/intel/mkl-dnn/releases/download/v0.21/mklml_mac_2019.0.5.20190502.tgz",
"https://github.com/intel/mkl-dnn/releases/download/v0.21/mklml_mac_2019.0.5.20190502.tgz",
],
)
if path_prefix:
print("path_prefix was specified to tf_workspace but is no longer used " +
"and will be removed in the future.")
tf_http_archive(
name = "XNNPACK",
sha256 = "2afaaf5f866ec714358985b123c3115043b9e099638100937743997f02bbd8cb",
strip_prefix = "XNNPACK-05702cf4099ad019ad1abb8ba656bfe04304f32a",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/XNNPACK/archive/05702cf4099ad019ad1abb8ba656bfe04304f32a.zip",
"https://github.com/google/XNNPACK/archive/05702cf4099ad019ad1abb8ba656bfe04304f32a.zip",
],
)
# Important: If you are upgrading MKL-DNN, then update the version numbers
# in third_party/mkl_dnn/mkldnn.BUILD. In addition, the new version of
# MKL-DNN might require upgrading MKL ML libraries also. If they need to be
# upgraded then update the version numbers on all three versions above
# (Linux, Mac, Windows).
tf_http_archive(
name = "mkl_dnn",
build_file = clean_dep("//third_party/mkl_dnn:mkldnn.BUILD"),
sha256 = "31e78581e59d7e60d4becaba3834fc6a5bf2dccdae3e16b7f70d89ceab38423f",
strip_prefix = "mkl-dnn-0.21.3",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/intel/mkl-dnn/archive/v0.21.3.tar.gz",
"https://github.com/intel/mkl-dnn/archive/v0.21.3.tar.gz",
],
)
tf_http_archive(
name = "mkl_dnn_v1",
build_file = clean_dep("//third_party/mkl_dnn:mkldnn_v1.BUILD"),
sha256 = "a71ec1f27c30b8a176605e8a78444f1f12301a3c313b70ff93290926c140509c",
strip_prefix = "mkl-dnn-1.2.2",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/intel/mkl-dnn/archive/v1.2.2.tar.gz",
"https://github.com/intel/mkl-dnn/archive/v1.2.2.tar.gz",
],
)
tf_http_archive(
name = "com_google_absl",
build_file = clean_dep("//third_party:com_google_absl.BUILD"),
# TODO: Remove the patch when https://github.com/abseil/abseil-cpp/issues/326 is resolved
# and when TensorFlow is build against CUDA 10.2
patch_file = clean_dep("//third_party:com_google_absl_fix_mac_and_nvcc_build.patch"),
sha256 = "f368a8476f4e2e0eccf8a7318b98dafbe30b2600f4e3cf52636e5eb145aba06a", # SHARED_ABSL_SHA
strip_prefix = "abseil-cpp-df3ea785d8c30a9503321a3d35ee7d35808f190d",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/abseil/abseil-cpp/archive/df3ea785d8c30a9503321a3d35ee7d35808f190d.tar.gz",
"https://github.com/abseil/abseil-cpp/archive/df3ea785d8c30a9503321a3d35ee7d35808f190d.tar.gz",
],
)
tf_http_archive(
name = "eigen_archive",
build_file = clean_dep("//third_party:eigen.BUILD"),
patch_file = clean_dep("//third_party/eigen3:gpu_packet_math.patch"),
sha256 = "62d1581a740caa74f1bf9db8552abebcd772bf12be035e9422bd59bfb0a2ba8e", # SHARED_EIGEN_SHA
strip_prefix = "eigen-deb93ed1bf359ac99923e3a2b90a2920b1101290",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/gitlab.com/libeigen/eigen/-/archive/deb93ed1bf359ac99923e3a2b90a2920b1101290/eigen-deb93ed1bf359ac99923e3a2b90a2920b1101290.tar.gz",
"https://gitlab.com/libeigen/eigen/-/archive/deb93ed1bf359ac99923e3a2b90a2920b1101290/eigen-deb93ed1bf359ac99923e3a2b90a2920b1101290.tar.gz",
],
)
tf_http_archive(
name = "arm_compiler",
build_file = clean_dep("//:arm_compiler.BUILD"),
sha256 = "b9e7d50ffd9996ed18900d041d362c99473b382c0ae049b2fce3290632d2656f",
strip_prefix = "rpi-newer-crosstools-eb68350c5c8ec1663b7fe52c742ac4271e3217c5/x64-gcc-6.5.0/arm-rpi-linux-gnueabihf/",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/rvagg/rpi-newer-crosstools/archive/eb68350c5c8ec1663b7fe52c742ac4271e3217c5.tar.gz",
"https://github.com/rvagg/rpi-newer-crosstools/archive/eb68350c5c8ec1663b7fe52c742ac4271e3217c5.tar.gz",
],
)
tf_http_archive(
name = "libxsmm_archive",
build_file = clean_dep("//third_party:libxsmm.BUILD"),
sha256 = "9c0af4509ea341d1ee2c6c19fc6f19289318c3bd4b17844efeb9e7f9691abf76",
strip_prefix = "libxsmm-1.14",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/hfp/libxsmm/archive/1.14.tar.gz",
"https://github.com/hfp/libxsmm/archive/1.14.tar.gz",
],
)
tf_http_archive(
name = "com_googlesource_code_re2",
sha256 = "d070e2ffc5476c496a6a872a6f246bfddce8e7797d6ba605a7c8d72866743bf9",
strip_prefix = "re2-506cfa4bffd060c06ec338ce50ea3468daa6c814",
system_build_file = clean_dep("//third_party/systemlibs:re2.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/re2/archive/506cfa4bffd060c06ec338ce50ea3468daa6c814.tar.gz",
"https://github.com/google/re2/archive/506cfa4bffd060c06ec338ce50ea3468daa6c814.tar.gz",
],
)
tf_http_archive(
name = "com_github_googlecloudplatform_google_cloud_cpp",
sha256 = "d67fed328d82aa404c3ab8f52814914f419a673573e3bbd98b4e6c405ca3cd06",
strip_prefix = "google-cloud-cpp-0.17.0",
system_build_file = clean_dep("//third_party/systemlibs:google_cloud_cpp.BUILD"),
system_link_files = {
"//third_party/systemlibs:google_cloud_cpp.google.cloud.bigtable.BUILD": "google/cloud/bigtable/BUILD",
},
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/googleapis/google-cloud-cpp/archive/v0.17.0.tar.gz",
"https://github.com/googleapis/google-cloud-cpp/archive/v0.17.0.tar.gz",
],
)
tf_http_archive(
name = "com_google_googleapis",
build_file = clean_dep("//third_party/googleapis:googleapis.BUILD"),
sha256 = "7ebab01b06c555f4b6514453dc3e1667f810ef91d1d4d2d3aa29bb9fcb40a900",
strip_prefix = "googleapis-541b1ded4abadcc38e8178680b0677f65594ea6f",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/googleapis/googleapis/archive/541b1ded4abadcc38e8178680b0677f65594ea6f.zip",
"https://github.com/googleapis/googleapis/archive/541b1ded4abadcc38e8178680b0677f65594ea6f.zip",
],
)
tf_http_archive(
name = "gemmlowp",
sha256 = "6678b484d929f2d0d3229d8ac4e3b815a950c86bb9f17851471d143f6d4f7834", # SHARED_GEMMLOWP_SHA
strip_prefix = "gemmlowp-12fed0cd7cfcd9e169bf1925bc3a7a58725fdcc3",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/gemmlowp/archive/12fed0cd7cfcd9e169bf1925bc3a7a58725fdcc3.zip",
"https://github.com/google/gemmlowp/archive/12fed0cd7cfcd9e169bf1925bc3a7a58725fdcc3.zip",
],
)
tf_http_archive(
name = "farmhash_archive",
build_file = clean_dep("//third_party:farmhash.BUILD"),
sha256 = "6560547c63e4af82b0f202cb710ceabb3f21347a4b996db565a411da5b17aba0", # SHARED_FARMHASH_SHA
strip_prefix = "farmhash-816a4ae622e964763ca0862d9dbd19324a1eaf45",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz",
"https://github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz",
],
)
tf_http_archive(
name = "png",
build_file = clean_dep("//third_party:png.BUILD"),
patch_file = clean_dep("//third_party:png_fix_rpi.patch"),
sha256 = "ca74a0dace179a8422187671aee97dd3892b53e168627145271cad5b5ac81307",
strip_prefix = "libpng-1.6.37",
system_build_file = clean_dep("//third_party/systemlibs:png.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/glennrp/libpng/archive/v1.6.37.tar.gz",
"https://github.com/glennrp/libpng/archive/v1.6.37.tar.gz",
],
)
tf_http_archive(
name = "org_sqlite",
build_file = clean_dep("//third_party:sqlite.BUILD"),
sha256 = "f3c79bc9f4162d0b06fa9fe09ee6ccd23bb99ce310b792c5145f87fbcc30efca",
strip_prefix = "sqlite-amalgamation-3310100",
system_build_file = clean_dep("//third_party/systemlibs:sqlite.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/www.sqlite.org/2020/sqlite-amalgamation-3310100.zip",
"https://www.sqlite.org/2020/sqlite-amalgamation-3310100.zip",
],
)
tf_http_archive(
name = "gif",
build_file = clean_dep("//third_party:gif.BUILD"),
patch_file = clean_dep("//third_party:gif_fix_strtok_r.patch"),
sha256 = "31da5562f44c5f15d63340a09a4fd62b48c45620cd302f77a6d9acf0077879bd",
strip_prefix = "giflib-5.2.1",
system_build_file = clean_dep("//third_party/systemlibs:gif.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pilotfiber.dl.sourceforge.net/project/giflib/giflib-5.2.1.tar.gz",
"https://pilotfiber.dl.sourceforge.net/project/giflib/giflib-5.2.1.tar.gz",
],
)
tf_http_archive(
name = "six_archive",
build_file = clean_dep("//third_party:six.BUILD"),
sha256 = "d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73",
strip_prefix = "six-1.12.0",
system_build_file = clean_dep("//third_party/systemlibs:six.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/source/s/six/six-1.12.0.tar.gz",
"https://pypi.python.org/packages/source/s/six/six-1.12.0.tar.gz",
],
)
tf_http_archive(
name = "astor_archive",
build_file = clean_dep("//third_party:astor.BUILD"),
sha256 = "95c30d87a6c2cf89aa628b87398466840f0ad8652f88eb173125a6df8533fb8d",
strip_prefix = "astor-0.7.1",
system_build_file = clean_dep("//third_party/systemlibs:astor.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/99/80/f9482277c919d28bebd85813c0a70117214149a96b08981b72b63240b84c/astor-0.7.1.tar.gz",
"https://pypi.python.org/packages/99/80/f9482277c919d28bebd85813c0a70117214149a96b08981b72b63240b84c/astor-0.7.1.tar.gz",
],
)
tf_http_archive(
name = "astunparse_archive",
build_file = clean_dep("//third_party:astunparse.BUILD"),
sha256 = "5ad93a8456f0d084c3456d059fd9a92cce667963232cbf763eac3bc5b7940872",
strip_prefix = "astunparse-1.6.3/lib",
system_build_file = clean_dep("//third_party/systemlibs:astunparse.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/files.pythonhosted.org/packages/f3/af/4182184d3c338792894f34a62672919db7ca008c89abee9b564dd34d8029/astunparse-1.6.3.tar.gz",
"https://files.pythonhosted.org/packages/f3/af/4182184d3c338792894f34a62672919db7ca008c89abee9b564dd34d8029/astunparse-1.6.3.tar.gz",
],
)
filegroup_external(
name = "astunparse_license",
licenses = ["notice"], # PSFL
sha256_urls = {
"92fc0e4f4fa9460558eedf3412b988d433a2dcbb3a9c45402a145a4fab8a6ac6": [
"https://storage.googleapis.com/mirror.tensorflow.org/raw.githubusercontent.com/simonpercivall/astunparse/v1.6.2/LICENSE",
"https://raw.githubusercontent.com/simonpercivall/astunparse/v1.6.2/LICENSE",
],
},
)
tf_http_archive(
name = "functools32_archive",
build_file = clean_dep("//third_party:functools32.BUILD"),
sha256 = "f6253dfbe0538ad2e387bd8fdfd9293c925d63553f5813c4e587745416501e6d",
strip_prefix = "functools32-3.2.3-2",
system_build_file = clean_dep("//third_party/systemlibs:functools32.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/c5/60/6ac26ad05857c601308d8fb9e87fa36d0ebf889423f47c3502ef034365db/functools32-3.2.3-2.tar.gz",
"https://pypi.python.org/packages/c5/60/6ac26ad05857c601308d8fb9e87fa36d0ebf889423f47c3502ef034365db/functools32-3.2.3-2.tar.gz",
],
)
tf_http_archive(
name = "gast_archive",
build_file = clean_dep("//third_party:gast.BUILD"),
sha256 = "b881ef288a49aa81440d2c5eb8aeefd4c2bb8993d5f50edae7413a85bfdb3b57",
strip_prefix = "gast-0.3.3",
system_build_file = clean_dep("//third_party/systemlibs:gast.BUILD"),
urls = [
"http://mirror.tensorflow.org/files.pythonhosted.org/packages/12/59/eaa15ab9710a20e22225efd042cd2d6a0b559a0656d5baba9641a2a4a921/gast-0.3.3.tar.gz",
"https://files.pythonhosted.org/packages/12/59/eaa15ab9710a20e22225efd042cd2d6a0b559a0656d5baba9641a2a4a921/gast-0.3.3.tar.gz",
],
)
tf_http_archive(
name = "termcolor_archive",
build_file = clean_dep("//third_party:termcolor.BUILD"),
sha256 = "1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b",
strip_prefix = "termcolor-1.1.0",
system_build_file = clean_dep("//third_party/systemlibs:termcolor.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/8a/48/a76be51647d0eb9f10e2a4511bf3ffb8cc1e6b14e9e4fab46173aa79f981/termcolor-1.1.0.tar.gz",
"https://pypi.python.org/packages/8a/48/a76be51647d0eb9f10e2a4511bf3ffb8cc1e6b14e9e4fab46173aa79f981/termcolor-1.1.0.tar.gz",
],
)
tf_http_archive(
name = "opt_einsum_archive",
build_file = clean_dep("//third_party:opt_einsum.BUILD"),
sha256 = "d3d464b4da7ef09e444c30e4003a27def37f85ff10ff2671e5f7d7813adac35b",
strip_prefix = "opt_einsum-2.3.2",
system_build_file = clean_dep("//third_party/systemlibs:opt_einsum.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/f6/d6/44792ec668bcda7d91913c75237314e688f70415ab2acd7172c845f0b24f/opt_einsum-2.3.2.tar.gz",
"https://pypi.python.org/packages/f6/d6/44792ec668bcda7d91913c75237314e688f70415ab2acd7172c845f0b24f/opt_einsum-2.3.2.tar.gz",
],
)
tf_http_archive(
name = "absl_py",
sha256 = "603febc9b95a8f2979a7bdb77d2f5e4d9b30d4e0d59579f88eba67d4e4cc5462",
strip_prefix = "abseil-py-pypi-v0.9.0",
system_build_file = clean_dep("//third_party/systemlibs:absl_py.BUILD"),
system_link_files = {
"//third_party/systemlibs:absl_py.absl.BUILD": "absl/BUILD",
"//third_party/systemlibs:absl_py.absl.flags.BUILD": "absl/flags/BUILD",
"//third_party/systemlibs:absl_py.absl.testing.BUILD": "absl/testing/BUILD",
},
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/abseil/abseil-py/archive/pypi-v0.9.0.tar.gz",
"https://github.com/abseil/abseil-py/archive/pypi-v0.9.0.tar.gz",
],
)
tf_http_archive(
name = "enum34_archive",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/bf/3e/31d502c25302814a7c2f1d3959d2a3b3f78e509002ba91aea64993936876/enum34-1.1.6.tar.gz",
"https://pypi.python.org/packages/bf/3e/31d502c25302814a7c2f1d3959d2a3b3f78e509002ba91aea64993936876/enum34-1.1.6.tar.gz",
],
sha256 = "8ad8c4783bf61ded74527bffb48ed9b54166685e4230386a9ed9b1279e2df5b1",
build_file = clean_dep("//third_party:enum34.BUILD"),
system_build_file = clean_dep("//third_party/systemlibs:enum34.BUILD"),
strip_prefix = "enum34-1.1.6/enum",
)
tf_http_archive(
name = "org_python_pypi_backports_weakref",
build_file = clean_dep("//third_party:backports_weakref.BUILD"),
sha256 = "8813bf712a66b3d8b85dc289e1104ed220f1878cf981e2fe756dfaabe9a82892",
strip_prefix = "backports.weakref-1.0rc1/src",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/bc/cc/3cdb0a02e7e96f6c70bd971bc8a90b8463fda83e264fa9c5c1c98ceabd81/backports.weakref-1.0rc1.tar.gz",
"https://pypi.python.org/packages/bc/cc/3cdb0a02e7e96f6c70bd971bc8a90b8463fda83e264fa9c5c1c98ceabd81/backports.weakref-1.0rc1.tar.gz",
],
)
filegroup_external(
name = "org_python_license",
licenses = ["notice"], # Python 2.0
sha256_urls = {
"e76cacdf0bdd265ff074ccca03671c33126f597f39d0ed97bc3e5673d9170cf6": [
"https://storage.googleapis.com/mirror.tensorflow.org/docs.python.org/2.7/_sources/license.rst.txt",
"https://docs.python.org/2.7/_sources/license.rst.txt",
],
},
)
tf_http_archive(
name = "com_google_protobuf",
patch_file = clean_dep("//third_party/protobuf:protobuf.patch"),
sha256 = "cfcba2df10feec52a84208693937c17a4b5df7775e1635c1e3baffc487b24c9b",
strip_prefix = "protobuf-3.9.2",
system_build_file = clean_dep("//third_party/systemlibs:protobuf.BUILD"),
system_link_files = {
"//third_party/systemlibs:protobuf.bzl": "protobuf.bzl",
},
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/protocolbuffers/protobuf/archive/v3.9.2.zip",
"https://github.com/protocolbuffers/protobuf/archive/v3.9.2.zip",
],
)
tf_http_archive(
name = "nsync",
sha256 = "caf32e6b3d478b78cff6c2ba009c3400f8251f646804bcb65465666a9cea93c4",
strip_prefix = "nsync-1.22.0",
system_build_file = clean_dep("//third_party/systemlibs:nsync.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/nsync/archive/1.22.0.tar.gz",
"https://github.com/google/nsync/archive/1.22.0.tar.gz",
],
)
tf_http_archive(
name = "com_google_googletest",
sha256 = "ff7a82736e158c077e76188232eac77913a15dac0b22508c390ab3f88e6d6d86",
strip_prefix = "googletest-b6cd405286ed8635ece71c72f118e659f4ade3fb",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/googletest/archive/b6cd405286ed8635ece71c72f118e659f4ade3fb.zip",
"https://github.com/google/googletest/archive/b6cd405286ed8635ece71c72f118e659f4ade3fb.zip",
],
)
tf_http_archive(
name = "com_github_gflags_gflags",
sha256 = "ae27cdbcd6a2f935baa78e4f21f675649271634c092b1be01469440495609d0e",
strip_prefix = "gflags-2.2.1",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/gflags/gflags/archive/v2.2.1.tar.gz",
"https://github.com/gflags/gflags/archive/v2.2.1.tar.gz",
],
)
tf_http_archive(
name = "pcre",
build_file = clean_dep("//third_party:pcre.BUILD"),
sha256 = "69acbc2fbdefb955d42a4c606dfde800c2885711d2979e356c0636efde9ec3b5",
strip_prefix = "pcre-8.42",
system_build_file = clean_dep("//third_party/systemlibs:pcre.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/ftp.exim.org/pub/pcre/pcre-8.42.tar.gz",
"https://ftp.exim.org/pub/pcre/pcre-8.42.tar.gz",
],
)
tf_http_archive(
name = "swig",
build_file = clean_dep("//third_party:swig.BUILD"),
sha256 = "58a475dbbd4a4d7075e5fe86d4e54c9edde39847cdb96a3053d87cb64a23a453",
strip_prefix = "swig-3.0.8",
system_build_file = clean_dep("//third_party/systemlibs:swig.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/ufpr.dl.sourceforge.net/project/swig/swig/swig-3.0.8/swig-3.0.8.tar.gz",
"https://ufpr.dl.sourceforge.net/project/swig/swig/swig-3.0.8/swig-3.0.8.tar.gz",
"https://pilotfiber.dl.sourceforge.net/project/swig/swig/swig-3.0.8/swig-3.0.8.tar.gz",
],
)
tf_http_archive(
name = "curl",
build_file = clean_dep("//third_party:curl.BUILD"),
sha256 = "01ae0c123dee45b01bbaef94c0bc00ed2aec89cb2ee0fd598e0d302a6b5e0a98",
strip_prefix = "curl-7.69.1",
system_build_file = clean_dep("//third_party/systemlibs:curl.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/curl.haxx.se/download/curl-7.69.1.tar.gz",
"https://curl.haxx.se/download/curl-7.69.1.tar.gz",
],
)
# WARNING: make sure ncteisen@ and vpai@ are cc-ed on any CL to change the below rule
tf_http_archive(
name = "com_github_grpc_grpc",
sha256 = "b956598d8cbe168b5ee717b5dafa56563eb5201a947856a6688bbeac9cac4e1f",
strip_prefix = "grpc-b54a5b338637f92bfcf4b0bc05e0f57a5fd8fadd",
system_build_file = clean_dep("//third_party/systemlibs:grpc.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/grpc/grpc/archive/b54a5b338637f92bfcf4b0bc05e0f57a5fd8fadd.tar.gz",
"https://github.com/grpc/grpc/archive/b54a5b338637f92bfcf4b0bc05e0f57a5fd8fadd.tar.gz",
],
)
tf_http_archive(
name = "com_github_nanopb_nanopb",
sha256 = "18234d9f01b57248472a9bfa65c3379352b5d66c15b0ef1c2b4feece4b5670fe",
build_file = "@com_github_grpc_grpc//third_party:nanopb.BUILD",
strip_prefix = "nanopb-0.4.1",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/nanopb/nanopb/archive/0.4.1.tar.gz",
"https://github.com/nanopb/nanopb/archive/0.4.1.tar.gz",
],
)
tf_http_archive(
name = "linenoise",
build_file = clean_dep("//third_party:linenoise.BUILD"),
sha256 = "7f51f45887a3d31b4ce4fa5965210a5e64637ceac12720cfce7954d6a2e812f7",
strip_prefix = "linenoise-c894b9e59f02203dbe4e2be657572cf88c4230c3",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/antirez/linenoise/archive/c894b9e59f02203dbe4e2be657572cf88c4230c3.tar.gz",
"https://github.com/antirez/linenoise/archive/c894b9e59f02203dbe4e2be657572cf88c4230c3.tar.gz",
],
)
# Check out LLVM and MLIR from llvm-project.
LLVM_COMMIT = "c8de17bca658e62bbf8c33eae839e457332e885e"
LLVM_SHA256 = "a1a4b06037c7b19a5f9414fee9626252e4de3e9d9461c8095cc569ee25d647a3"
LLVM_URLS = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
]
tf_http_archive(
name = "llvm-project",
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = LLVM_URLS,
additional_build_files = {
clean_dep("//third_party/llvm:llvm.autogenerated.BUILD"): "llvm/BUILD",
"//third_party/mlir:BUILD": "mlir/BUILD",
"//third_party/mlir:test.BUILD": "mlir/test/BUILD",
},
)
tf_http_archive(
name = "lmdb",
build_file = clean_dep("//third_party:lmdb.BUILD"),
sha256 = "f3927859882eb608868c8c31586bb7eb84562a40a6bf5cc3e13b6b564641ea28",
strip_prefix = "lmdb-LMDB_0.9.22/libraries/liblmdb",
system_build_file = clean_dep("//third_party/systemlibs:lmdb.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/LMDB/lmdb/archive/LMDB_0.9.22.tar.gz",
"https://github.com/LMDB/lmdb/archive/LMDB_0.9.22.tar.gz",
],
)
tf_http_archive(
name = "jsoncpp_git",
build_file = clean_dep("//third_party:jsoncpp.BUILD"),
sha256 = "77a402fb577b2e0e5d0bdc1cf9c65278915cdb25171e3452c68b6da8a561f8f0",
strip_prefix = "jsoncpp-1.9.2",
system_build_file = clean_dep("//third_party/systemlibs:jsoncpp.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/open-source-parsers/jsoncpp/archive/1.9.2.tar.gz",
"https://github.com/open-source-parsers/jsoncpp/archive/1.9.2.tar.gz",
],
)
tf_http_archive(
name = "boringssl",
sha256 = "a9c3b03657d507975a32732f04563132b4553c20747cec6dc04de475c8bdf29f",
strip_prefix = "boringssl-80ca9f9f6ece29ab132cce4cf807a9465a18cfac",
system_build_file = clean_dep("//third_party/systemlibs:boringssl.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/boringssl/archive/80ca9f9f6ece29ab132cce4cf807a9465a18cfac.tar.gz",
"https://github.com/google/boringssl/archive/80ca9f9f6ece29ab132cce4cf807a9465a18cfac.tar.gz",
],
)
tf_http_archive(
name = "zlib",
build_file = clean_dep("//third_party:zlib.BUILD"),
sha256 = "c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1",
strip_prefix = "zlib-1.2.11",
system_build_file = clean_dep("//third_party/systemlibs:zlib.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/zlib.net/zlib-1.2.11.tar.gz",
"https://zlib.net/zlib-1.2.11.tar.gz",
],
)
tf_http_archive(
name = "fft2d",
build_file = clean_dep("//third_party/fft2d:fft2d.BUILD"),
sha256 = "ada7e99087c4ed477bfdf11413f2ba8db8a840ba9bbf8ac94f4f3972e2a7cec9",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/www.kurims.kyoto-u.ac.jp/~ooura/fft2d.tgz",
"https://www.kurims.kyoto-u.ac.jp/~ooura/fft2d.tgz",
],
)
tf_http_archive(
name = "snappy",
build_file = clean_dep("//third_party:snappy.BUILD"),
sha256 = "3dfa02e873ff51a11ee02b9ca391807f0c8ea0529a4924afa645fbf97163f9d4",
strip_prefix = "snappy-1.1.7",
system_build_file = clean_dep("//third_party/systemlibs:snappy.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/snappy/archive/1.1.7.tar.gz",
"https://github.com/google/snappy/archive/1.1.7.tar.gz",
],
)
tf_http_archive(
name = "nccl_archive",
build_file = clean_dep("//third_party:nccl/archive.BUILD"),
patch_file = clean_dep("//third_party/nccl:archive.patch"),
sha256 = "7ff66aca18392b162829612e02c00b123a58ec35869334f72d7e5afaf5ea4a13",
strip_prefix = "nccl-3701130b3c1bcdb01c14b3cb70fe52498c1e82b7",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/nvidia/nccl/archive/3701130b3c1bcdb01c14b3cb70fe52498c1e82b7.tar.gz",
"https://github.com/nvidia/nccl/archive/3701130b3c1bcdb01c14b3cb70fe52498c1e82b7.tar.gz",
],
)
java_import_external(
name = "junit",
jar_sha256 = "59721f0805e223d84b90677887d9ff567dc534d7c502ca903c0c2b17f05c116a",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/junit/junit/4.12/junit-4.12.jar",
"https://repo1.maven.org/maven2/junit/junit/4.12/junit-4.12.jar",
"https://maven.ibiblio.org/maven2/junit/junit/4.12/junit-4.12.jar",
],
licenses = ["reciprocal"], # Common Public License Version 1.0
testonly_ = True,
deps = ["@org_hamcrest_core"],
)
java_import_external(
name = "org_hamcrest_core",
jar_sha256 = "66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
"https://repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
"https://maven.ibiblio.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
],
licenses = ["notice"], # New BSD License
testonly_ = True,
)
java_import_external(
name = "com_google_testing_compile",
jar_sha256 = "edc180fdcd9f740240da1a7a45673f46f59c5578d8cd3fbc912161f74b5aebb8",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/google/testing/compile/compile-testing/0.11/compile-testing-0.11.jar",
"https://repo1.maven.org/maven2/com/google/testing/compile/compile-testing/0.11/compile-testing-0.11.jar",
],
licenses = ["notice"], # New BSD License
testonly_ = True,
deps = ["@com_google_guava", "@com_google_truth"],
)
java_import_external(
name = "com_google_truth",
jar_sha256 = "032eddc69652b0a1f8d458f999b4a9534965c646b8b5de0eba48ee69407051df",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/google/truth/truth/0.32/truth-0.32.jar",
"https://repo1.maven.org/maven2/com/google/truth/truth/0.32/truth-0.32.jar",
],
licenses = ["notice"], # Apache 2.0
testonly_ = True,
deps = ["@com_google_guava"],
)
java_import_external(
name = "org_checkerframework_qual",
jar_sha256 = "d261fde25d590f6b69db7721d469ac1b0a19a17ccaaaa751c31f0d8b8260b894",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/org/checkerframework/checker-qual/2.10.0/checker-qual-2.10.0.jar",
"https://repo1.maven.org/maven2/org/checkerframework/checker-qual/2.10.0/checker-qual-2.10.0.jar",
],
licenses = ["notice"], # Apache 2.0
)
java_import_external(
name = "com_squareup_javapoet",
jar_sha256 = "5bb5abdfe4366c15c0da3332c57d484e238bd48260d6f9d6acf2b08fdde1efea",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/squareup/javapoet/1.9.0/javapoet-1.9.0.jar",
"https://repo1.maven.org/maven2/com/squareup/javapoet/1.9.0/javapoet-1.9.0.jar",
],
licenses = ["notice"], # Apache 2.0
)
tf_http_archive(
name = "com_google_pprof",
build_file = clean_dep("//third_party:pprof.BUILD"),
sha256 = "e0928ca4aa10ea1e0551e2d7ce4d1d7ea2d84b2abbdef082b0da84268791d0c4",
strip_prefix = "pprof-c0fb62ec88c411cc91194465e54db2632845b650",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/pprof/archive/c0fb62ec88c411cc91194465e54db2632845b650.tar.gz",
"https://github.com/google/pprof/archive/c0fb62ec88c411cc91194465e54db2632845b650.tar.gz",
],
)
tf_http_archive(
name = "cub_archive",
build_file = clean_dep("//third_party:cub.BUILD"),
patch_file = clean_dep("//third_party:cub.pr170.patch"),
sha256 = "6bfa06ab52a650ae7ee6963143a0bbc667d6504822cbd9670369b598f18c58c3",
strip_prefix = "cub-1.8.0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/NVlabs/cub/archive/1.8.0.zip",
"https://github.com/NVlabs/cub/archive/1.8.0.zip",
],
)
tf_http_archive(
name = "cython",
build_file = clean_dep("//third_party:cython.BUILD"),
delete = ["BUILD.bazel"],
sha256 = "bccc9aa050ea02595b2440188813b936eaf345e85fb9692790cecfe095cf91aa",
strip_prefix = "cython-0.28.4",
system_build_file = clean_dep("//third_party/systemlibs:cython.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/cython/cython/archive/0.28.4.tar.gz",
"https://github.com/cython/cython/archive/0.28.4.tar.gz",
],
)
tf_http_archive(
name = "arm_neon_2_x86_sse",
build_file = clean_dep("//third_party:arm_neon_2_x86_sse.BUILD"),
sha256 = "213733991310b904b11b053ac224fee2d4e0179e46b52fe7f8735b8831e04dcc",
strip_prefix = "ARM_NEON_2_x86_SSE-1200fe90bb174a6224a525ee60148671a786a71f",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/intel/ARM_NEON_2_x86_SSE/archive/1200fe90bb174a6224a525ee60148671a786a71f.tar.gz",
"https://github.com/intel/ARM_NEON_2_x86_SSE/archive/1200fe90bb174a6224a525ee60148671a786a71f.tar.gz",
],
)
tf_http_archive(
name = "double_conversion",
build_file = clean_dep("//third_party:double_conversion.BUILD"),
sha256 = "2f7fbffac0d98d201ad0586f686034371a6d152ca67508ab611adc2386ad30de",
strip_prefix = "double-conversion-3992066a95b823efc8ccc1baf82a1cfc73f6e9b8",
system_build_file = clean_dep("//third_party/systemlibs:double_conversion.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/double-conversion/archive/3992066a95b823efc8ccc1baf82a1cfc73f6e9b8.zip",
"https://github.com/google/double-conversion/archive/3992066a95b823efc8ccc1baf82a1cfc73f6e9b8.zip",
],
)
tf_http_archive(
name = "tflite_mobilenet_float",
build_file = clean_dep("//third_party:tflite_mobilenet_float.BUILD"),
sha256 = "2fadeabb9968ec6833bee903900dda6e61b3947200535874ce2fe42a8493abc0",
urls = [
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz",
],
)
tf_http_archive(
name = "tflite_mobilenet_quant",
build_file = clean_dep("//third_party:tflite_mobilenet_quant.BUILD"),
sha256 = "d32432d28673a936b2d6281ab0600c71cf7226dfe4cdcef3012555f691744166",
urls = [
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
],
)
tf_http_archive(
name = "tflite_mobilenet_ssd",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "767057f2837a46d97882734b03428e8dd640b93236052b312b2f0e45613c1cf0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_ssd_tflite_v1.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_ssd_tflite_v1.zip",
],
)
tf_http_archive(
name = "tflite_mobilenet_ssd_quant",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "a809cd290b4d6a2e8a9d5dad076e0bd695b8091974e0eed1052b480b2f21b6dc",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_0.75_quant_2018_06_29.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_0.75_quant_2018_06_29.zip",
],
)
tf_http_archive(
name = "tflite_mobilenet_ssd_quant_protobuf",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "09280972c5777f1aa775ef67cb4ac5d5ed21970acd8535aeca62450ef14f0d79",
strip_prefix = "ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18.tar.gz",
"https://storage.googleapis.com/download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18.tar.gz",
],
)
tf_http_archive(
name = "tflite_conv_actions_frozen",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "d947b38cba389b5e2d0bfc3ea6cc49c784e187b41a071387b3742d1acac7691e",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/conv_actions_tflite.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/conv_actions_tflite.zip",
],
)
tf_http_archive(
name = "tflite_ovic_testdata",
build_file = clean_dep("//third_party:tflite_ovic_testdata.BUILD"),
sha256 = "033c941b7829b05ca55a124a26a6a0581b1ececc154a2153cafcfdb54f80dca2",
strip_prefix = "ovic",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/data/ovic_2019_04_30.zip",
"https://storage.googleapis.com/download.tensorflow.org/data/ovic_2019_04_30.zip",
],
)
tf_http_archive(
name = "rules_cc",
sha256 = "cf3b76a90c86c0554c5b10f4b160f05af71d252026b71362c4674e2fb9936cf9",
strip_prefix = "rules_cc-01d4a48911d5e7591ecb1c06d3b8af47fe872371",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_cc/archive/01d4a48911d5e7591ecb1c06d3b8af47fe872371.zip",
"https://github.com/bazelbuild/rules_cc/archive/01d4a48911d5e7591ecb1c06d3b8af47fe872371.zip",
],
)
tf_http_archive(
name = "rules_python",
sha256 = "aa96a691d3a8177f3215b14b0edc9641787abaaa30363a080165d06ab65e1161",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_python/releases/download/0.0.1/rules_python-0.0.1.tar.gz",
"https://github.com/bazelbuild/rules_python/releases/download/0.0.1/rules_python-0.0.1.tar.gz",
],
)
tf_http_archive(
name = "build_bazel_rules_android",
sha256 = "cd06d15dd8bb59926e4d65f9003bfc20f9da4b2519985c27e190cddc8b7a7806",
strip_prefix = "rules_android-0.1.1",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_android/archive/v0.1.1.zip",
"https://github.com/bazelbuild/rules_android/archive/v0.1.1.zip",
],
)
# Apple and Swift rules.
# https://github.com/bazelbuild/rules_apple/releases
tf_http_archive(
name = "build_bazel_rules_apple",
sha256 = "a045a436b642c70fb0c10ca84ff0fd2dcbd59cc89100d597a61e8374afafb366",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_apple/releases/download/0.18.0/rules_apple.0.18.0.tar.gz",
"https://github.com/bazelbuild/rules_apple/releases/download/0.18.0/rules_apple.0.18.0.tar.gz",
],
)
# https://github.com/bazelbuild/rules_swift/releases
tf_http_archive(
name = "build_bazel_rules_swift",
sha256 = "18cd4df4e410b0439a4935f9ca035bd979993d42372ba79e7f2d4fafe9596ef0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_swift/releases/download/0.12.1/rules_swift.0.12.1.tar.gz",
"https://github.com/bazelbuild/rules_swift/releases/download/0.12.1/rules_swift.0.12.1.tar.gz",
],
)
# https://github.com/bazelbuild/apple_support/releases
tf_http_archive(
name = "build_bazel_apple_support",
sha256 = "122ebf7fe7d1c8e938af6aeaee0efe788a3a2449ece5a8d6a428cb18d6f88033",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/apple_support/releases/download/0.7.1/apple_support.0.7.1.tar.gz",
"https://github.com/bazelbuild/apple_support/releases/download/0.7.1/apple_support.0.7.1.tar.gz",
],
)
# https://github.com/bazelbuild/bazel-skylib/releases
tf_http_archive(
name = "bazel_skylib",
sha256 = "1dde365491125a3db70731e25658dfdd3bc5dbdfd11b840b3e987ecf043c7ca0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/bazel-skylib/releases/download/0.9.0/bazel_skylib-0.9.0.tar.gz",
"https://github.com/bazelbuild/bazel-skylib/releases/download/0.9.0/bazel_skylib-0.9.0.tar.gz",
],
)
# https://github.com/apple/swift-protobuf/releases
tf_http_archive(
name = "com_github_apple_swift_swift_protobuf",
strip_prefix = "swift-protobuf-1.6.0/",
sha256 = "4ccf6e5ea558e8287bf6331f9f6e52b3c321fca5f1d181d03680f415c32a6bba",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/apple/swift-protobuf/archive/1.6.0.zip",
"https://github.com/apple/swift-protobuf/archive/1.6.0.zip",
],
)
# https://github.com/google/xctestrunner/releases
http_file(
name = "xctestrunner",
executable = 1,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/xctestrunner/releases/download/0.2.9/ios_test_runner.par",
"https://github.com/google/xctestrunner/releases/download/0.2.9/ios_test_runner.par",
],
)
tf_http_archive(
name = "tbb",
build_file = clean_dep("//third_party/ngraph:tbb.BUILD"),
sha256 = "c3245012296f09f1418b78a8c2f17df5188b3bd0db620f7fd5fabe363320805a",
strip_prefix = "tbb-2019_U1",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/01org/tbb/archive/2019_U1.zip",
"https://github.com/01org/tbb/archive/2019_U1.zip",
],
)
tf_http_archive(
name = "ngraph",
build_file = clean_dep("//third_party/ngraph:ngraph.BUILD"),
sha256 = "a1780f24a1381fc25e323b4b2d08b6ef5129f42e011305b2a34dcf43a48030d5",
strip_prefix = "ngraph-0.11.0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/NervanaSystems/ngraph/archive/v0.11.0.tar.gz",
"https://github.com/NervanaSystems/ngraph/archive/v0.11.0.tar.gz",
],
)
tf_http_archive(
name = "nlohmann_json_lib",
build_file = clean_dep("//third_party/ngraph:nlohmann_json.BUILD"),
sha256 = "c377963a95989270c943d522bfefe7b889ef5ed0e1e15d535fd6f6f16ed70732",
strip_prefix = "json-3.4.0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/nlohmann/json/archive/v3.4.0.tar.gz",
"https://github.com/nlohmann/json/archive/v3.4.0.tar.gz",
],
)
tf_http_archive(
name = "ngraph_tf",
build_file = clean_dep("//third_party/ngraph:ngraph_tf.BUILD"),
sha256 = "742a642d2c6622277df4c902b6830d616d0539cc8cd843d6cdb899bb99e66e36",
strip_prefix = "ngraph-tf-0.9.0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/NervanaSystems/ngraph-tf/archive/v0.9.0.zip",
"https://github.com/NervanaSystems/ngraph-tf/archive/v0.9.0.zip",
],
)
tf_http_archive(
name = "pybind11",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/pybind/pybind11/archive/v2.4.3.tar.gz",
"https://github.com/pybind/pybind11/archive/v2.4.3.tar.gz",
],
sha256 = "1eed57bc6863190e35637290f97a20c81cfe4d9090ac0a24f3bbf08f265eb71d",
strip_prefix = "pybind11-2.4.3",
build_file = clean_dep("//third_party:pybind11.BUILD"),
)
tf_http_archive(
name = "wrapt",
build_file = clean_dep("//third_party:wrapt.BUILD"),
sha256 = "8a6fb40e8f8b6a66b4ba81a4044c68e6a7b1782f21cfabc06fb765332b4c3e51",
strip_prefix = "wrapt-1.11.1/src/wrapt",
system_build_file = clean_dep("//third_party/systemlibs:wrapt.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/GrahamDumpleton/wrapt/archive/1.11.1.tar.gz",
"https://github.com/GrahamDumpleton/wrapt/archive/1.11.1.tar.gz",
],
)
tf_http_archive(
name = "coremltools",
sha256 = "0d594a714e8a5fd5bd740ad112ef59155c0482e25fdc8f8efa5758f90abdcf1e",
strip_prefix = "coremltools-3.3",
build_file = clean_dep("//third_party:coremltools.BUILD"),
urls = [
"http://mirror.tensorflow.org/github.com/apple/coremltools/archive/3.3.zip",
"https://github.com/apple/coremltools/archive/3.3.zip",
],
)
def tf_bind():
"""Bind targets for some external repositories"""
##############################################################################
# BIND DEFINITIONS
#
# Please do not add bind() definitions unless we have no other choice.
# If that ends up being the case, please leave a comment explaining
# why we can't depend on the canonical build target.
# Needed by Protobuf
native.bind(
name = "grpc_cpp_plugin",
actual = "@com_github_grpc_grpc//src/compiler:grpc_cpp_plugin",
)
native.bind(
name = "grpc_python_plugin",
actual = "@com_github_grpc_grpc//src/compiler:grpc_python_plugin",
)
native.bind(
name = "grpc_lib",
actual = "@com_github_grpc_grpc//:grpc++",
)
native.bind(
name = "grpc_lib_unsecure",
actual = "@com_github_grpc_grpc//:grpc++_unsecure",
)
# Needed by Protobuf
native.bind(
name = "python_headers",
actual = clean_dep("//third_party/python_runtime:headers"),
)
# Needed by Protobuf
native.bind(
name = "six",
actual = "@six_archive//:six",
)
| 47.88465
| 203
| 0.684357
|
37a312574c011b4dd333ec54c5a8002e0fa61794
| 4,929
|
py
|
Python
|
components/google-cloud/google_cloud_pipeline_components/experimental/remote/gcp_launcher/custom_job_remote_runner.py
|
hanwgyu/pipelines
|
9f9830fef25be0fa88786d472933b914b55874b5
|
[
"Apache-2.0"
] | null | null | null |
components/google-cloud/google_cloud_pipeline_components/experimental/remote/gcp_launcher/custom_job_remote_runner.py
|
hanwgyu/pipelines
|
9f9830fef25be0fa88786d472933b914b55874b5
|
[
"Apache-2.0"
] | null | null | null |
components/google-cloud/google_cloud_pipeline_components/experimental/remote/gcp_launcher/custom_job_remote_runner.py
|
hanwgyu/pipelines
|
9f9830fef25be0fa88786d472933b914b55874b5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import time
from os import path
from google.api_core import gapic_v1
from google.cloud import aiplatform
from google.cloud.aiplatform.compat.types import job_state as gca_job_state
_POLLING_INTERVAL_IN_SECONDS = 20
_CONNECTION_ERROR_RETRY_LIMIT = 5
_JOB_COMPLETE_STATES = (
gca_job_state.JobState.JOB_STATE_SUCCEEDED,
gca_job_state.JobState.JOB_STATE_FAILED,
gca_job_state.JobState.JOB_STATE_CANCELLED,
gca_job_state.JobState.JOB_STATE_PAUSED,
)
_JOB_ERROR_STATES = (
gca_job_state.JobState.JOB_STATE_FAILED,
gca_job_state.JobState.JOB_STATE_CANCELLED,
gca_job_state.JobState.JOB_STATE_PAUSED,
)
def create_custom_job(
type,
project,
location,
payload,
gcp_resources,
):
"""
Create and poll custom job status till it reaches a final state.
This follows the typical launching logic
1. Read if the custom job already exists in gcp_resources
- If already exists, jump to step 3 and poll the job status. This happens if the
launcher container experienced unexpected termination, such as preemption
2. Deserialize the payload into the job spec and create the custom job.
3. Poll the custom job status every _POLLING_INTERVAL_IN_SECONDS seconds
- If the custom job is succeeded, return succeeded
- If the custom job is cancelled/paused, it's an unexpected scenario so return failed
- If the custom job is running, continue polling the status
Also retry on ConnectionError up to _CONNECTION_ERROR_RETRY_LIMIT times during the poll.
"""
client_options = {"api_endpoint": location + '-aiplatform.googleapis.com'}
client_info = gapic_v1.client_info.ClientInfo(
user_agent="google-cloud-pipeline-components",
)
# Initialize client that will be used to create and send requests.
job_client = aiplatform.gapic.JobServiceClient(
client_options=client_options, client_info=client_info
)
# Check if the Custom job already exists
if path.exists(gcp_resources) and os.stat(gcp_resources).st_size != 0:
with open(gcp_resources) as f:
custom_job_name = f.read()
logging.info(
'CustomJob name already exists: %s. Continue polling the status',
custom_job_name
)
else:
parent = f"projects/{project}/locations/{location}"
job_spec = json.loads(payload, strict=False)
create_custom_job_response = job_client.create_custom_job(
parent=parent, custom_job=job_spec
)
custom_job_name = create_custom_job_response.name
# Write the job id to output
with open(gcp_resources, 'w') as f:
f.write(custom_job_name)
# Poll the job status
get_custom_job_response = job_client.get_custom_job(name=custom_job_name)
retry_count = 0
while get_custom_job_response.state not in _JOB_COMPLETE_STATES:
time.sleep(_POLLING_INTERVAL_IN_SECONDS)
try:
get_custom_job_response = job_client.get_custom_job(
name=custom_job_name
)
logging.info(
'GetCustomJob response state =%s', get_custom_job_response.state
)
retry_count = 0
# Handle transient connection error.
except ConnectionError as err:
retry_count += 1
if retry_count < _CONNECTION_ERROR_RETRY_LIMIT:
logging.warning(
'ConnectionError (%s) encountered when polling job: %s. Trying to '
'recreate the API client.', err, custom_job_name
)
# Recreate the Python API client.
job_client = aiplatform.gapic.JobServiceClient(
client_options=client_options
)
else:
logging.error(
'Request failed after %s retries.',
_CONNECTION_ERROR_RETRY_LIMIT
)
raise
if get_custom_job_response.state in _JOB_ERROR_STATES:
raise RuntimeError(
"Job failed with:\n%s" % get_custom_job_response.state
)
else:
logging.info(
'CustomJob %s completed with response state =%s', custom_job_name,
get_custom_job_response.state
)
| 36.783582
| 90
| 0.683506
|
b9a93070899483801218efa0ac5e15f4f16de811
| 27,276
|
py
|
Python
|
lte/gateway/python/magma/pipelined/tests/test_uplink_bridge.py
|
hkshiv1/magma
|
1c91010f9726127b4702f9c85af2970fae7d5e2b
|
[
"BSD-3-Clause"
] | 1
|
2021-08-08T15:49:05.000Z
|
2021-08-08T15:49:05.000Z
|
lte/gateway/python/magma/pipelined/tests/test_uplink_bridge.py
|
hkshiv1/magma
|
1c91010f9726127b4702f9c85af2970fae7d5e2b
|
[
"BSD-3-Clause"
] | 143
|
2020-09-08T06:24:23.000Z
|
2022-03-29T05:56:53.000Z
|
lte/gateway/python/magma/pipelined/tests/test_uplink_bridge.py
|
hkshiv1/magma
|
1c91010f9726127b4702f9c85af2970fae7d5e2b
|
[
"BSD-3-Clause"
] | 1
|
2021-12-10T22:41:05.000Z
|
2021-12-10T22:41:05.000Z
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import subprocess
import unittest
import warnings
from concurrent.futures import Future
from magma.pipelined.bridge_util import BridgeTools
from magma.pipelined.tests.app.start_pipelined import (
PipelinedController,
TestSetup,
)
from magma.pipelined.tests.pipelined_test_util import (
assert_bridge_snapshot_match,
create_service_manager,
get_iface_gw_ipv4,
get_iface_ipv4,
get_ovsdb_port_tag,
start_ryu_app_thread,
stop_ryu_app_thread,
)
from ryu.lib import hub
class UplinkBridgeTest(unittest.TestCase):
BRIDGE = 'testing_br'
MAC_DEST = "5e:cc:cc:b1:49:4b"
BRIDGE_IP = '192.168.128.1'
UPLINK_BRIDGE = 'upt_br0'
@classmethod
def setUpClass(cls):
"""
Starts the thread which launches ryu apps
Create a testing bridge, add a port, setup the port interfaces. Then
launch the ryu apps for testing pipelined. Gets the references
to apps launched by using futures.
"""
super(UplinkBridgeTest, cls).setUpClass()
warnings.simplefilter('ignore')
cls.service_manager = create_service_manager([])
uplink_bridge_controller_reference = Future()
testing_controller_reference = Future()
test_setup = TestSetup(
apps=[PipelinedController.UplinkBridge,
PipelinedController.Testing,
PipelinedController.StartupFlows],
references={
PipelinedController.UplinkBridge:
uplink_bridge_controller_reference,
PipelinedController.Testing:
testing_controller_reference,
PipelinedController.StartupFlows:
Future(),
},
config={
'bridge_name': cls.BRIDGE,
'bridge_ip_address': cls.BRIDGE_IP,
'ovs_gtp_port_number': 32768,
'clean_restart': True,
'enable_nat': True,
},
mconfig=None,
loop=None,
service_manager=cls.service_manager,
integ_test=False,
)
BridgeTools.create_bridge(cls.BRIDGE, cls.BRIDGE)
BridgeTools.create_bridge(cls.UPLINK_BRIDGE, cls.UPLINK_BRIDGE)
cls.thread = start_ryu_app_thread(test_setup)
cls.uplink_br_controller = uplink_bridge_controller_reference.result()
cls.testing_controller = testing_controller_reference.result()
@classmethod
def tearDownClass(cls):
stop_ryu_app_thread(cls.thread)
BridgeTools.destroy_bridge(cls.BRIDGE)
BridgeTools.destroy_bridge(cls.UPLINK_BRIDGE)
def testFlowSnapshotMatch(self):
assert_bridge_snapshot_match(self, self.UPLINK_BRIDGE, self.service_manager)
class UplinkBridgeWithNonNATTest(unittest.TestCase):
BRIDGE = 'testing_br'
MAC_DEST = "5e:cc:cc:b1:49:4b"
BRIDGE_IP = '192.168.128.1'
UPLINK_BRIDGE = 'upt_br0'
UPLINK_DHCP = 'test_dhcp0'
UPLINK_PATCH = 'test_patch_p2'
UPLINK_ETH_PORT = 'test_eth3'
VLAN_DEV_IN = "test_v_in"
VLAN_DEV_OUT = "test_v_out"
@classmethod
def setUpClass(cls):
"""
Starts the thread which launches ryu apps
Create a testing bridge, add a port, setup the port interfaces. Then
launch the ryu apps for testing pipelined. Gets the references
to apps launched by using futures.
"""
super(UplinkBridgeWithNonNATTest, cls).setUpClass()
warnings.simplefilter('ignore')
cls.service_manager = create_service_manager([])
uplink_bridge_controller_reference = Future()
testing_controller_reference = Future()
test_setup = TestSetup(
apps=[PipelinedController.UplinkBridge,
PipelinedController.Testing,
PipelinedController.StartupFlows],
references={
PipelinedController.UplinkBridge:
uplink_bridge_controller_reference,
PipelinedController.Testing:
testing_controller_reference,
PipelinedController.StartupFlows:
Future(),
},
config={
'bridge_name': cls.BRIDGE,
'bridge_ip_address': cls.BRIDGE_IP,
'ovs_gtp_port_number': 32768,
'clean_restart': True,
'enable_nat': False,
'uplink_bridge': cls.UPLINK_BRIDGE,
'uplink_eth_port_name': cls.UPLINK_ETH_PORT,
'virtual_mac': '02:bb:5e:36:06:4b',
'uplink_patch': cls.UPLINK_PATCH,
'uplink_dhcp_port': cls.UPLINK_DHCP,
'sgi_management_iface_vlan': "",
'dev_vlan_in': cls.VLAN_DEV_IN,
'dev_vlan_out': cls.VLAN_DEV_OUT,
'ovs_vlan_workaround': False,
'sgi_management_iface_ip_addr': '1.1.11.1',
},
mconfig=None,
loop=None,
service_manager=cls.service_manager,
integ_test=False,
)
BridgeTools.create_bridge(cls.BRIDGE, cls.BRIDGE)
BridgeTools.create_bridge(cls.UPLINK_BRIDGE, cls.UPLINK_BRIDGE)
BridgeTools.create_veth_pair(cls.VLAN_DEV_IN,
cls.VLAN_DEV_OUT)
# Add to OVS,
BridgeTools.add_ovs_port(cls.UPLINK_BRIDGE,
cls.VLAN_DEV_IN, "70")
BridgeTools.add_ovs_port(cls.UPLINK_BRIDGE,
cls.VLAN_DEV_OUT, "71")
# dummy uplink interface
vlan = "10"
BridgeTools.create_internal_iface(cls.UPLINK_BRIDGE,
cls.UPLINK_DHCP, None)
BridgeTools.create_internal_iface(cls.UPLINK_BRIDGE,
cls.UPLINK_PATCH, None)
BridgeTools.create_internal_iface(cls.UPLINK_BRIDGE,
cls.UPLINK_ETH_PORT, None)
cls.thread = start_ryu_app_thread(test_setup)
cls.uplink_br_controller = uplink_bridge_controller_reference.result()
cls.testing_controller = testing_controller_reference.result()
@classmethod
def tearDownClass(cls):
stop_ryu_app_thread(cls.thread)
BridgeTools.destroy_bridge(cls.BRIDGE)
BridgeTools.destroy_bridge(cls.UPLINK_BRIDGE)
def testFlowSnapshotMatch(self):
cls = self.__class__
assert_bridge_snapshot_match(self, self.UPLINK_BRIDGE, self.service_manager,
include_stats=False)
class UplinkBridgeWithNonNATTestVlan(unittest.TestCase):
BRIDGE = 'testing_br'
MAC_DEST = "5e:cc:cc:b1:49:4b"
BRIDGE_IP = '192.168.128.1'
UPLINK_BRIDGE = 'upt_br0'
UPLINK_DHCP = 'test_dhcp0'
UPLINK_PATCH = 'test_patch_p2'
UPLINK_ETH_PORT = 'test_eth3'
VLAN_TAG='100'
VLAN_DEV_IN = "test_v_in"
VLAN_DEV_OUT = "test_v_out"
@classmethod
def setUpClass(cls):
"""
Starts the thread which launches ryu apps
Create a testing bridge, add a port, setup the port interfaces. Then
launch the ryu apps for testing pipelined. Gets the references
to apps launched by using futures.
"""
super(UplinkBridgeWithNonNATTestVlan, cls).setUpClass()
warnings.simplefilter('ignore')
cls.service_manager = create_service_manager([])
uplink_bridge_controller_reference = Future()
testing_controller_reference = Future()
test_setup = TestSetup(
apps=[PipelinedController.UplinkBridge,
PipelinedController.Testing,
PipelinedController.StartupFlows],
references={
PipelinedController.UplinkBridge:
uplink_bridge_controller_reference,
PipelinedController.Testing:
testing_controller_reference,
PipelinedController.StartupFlows:
Future(),
},
config={
'bridge_name': cls.BRIDGE,
'bridge_ip_address': cls.BRIDGE_IP,
'ovs_gtp_port_number': 32768,
'clean_restart': True,
'enable_nat': False,
'uplink_bridge': cls.UPLINK_BRIDGE,
'uplink_eth_port_name': cls.UPLINK_ETH_PORT,
'virtual_mac': '02:bb:5e:36:06:4b',
'uplink_patch': cls.UPLINK_PATCH,
'uplink_dhcp_port': cls.UPLINK_DHCP,
'sgi_management_iface_vlan': cls.VLAN_TAG,
'dev_vlan_in': cls.VLAN_DEV_IN,
'dev_vlan_out': cls.VLAN_DEV_OUT,
'sgi_management_iface_ip_addr': '1.1.11.1',
},
mconfig=None,
loop=None,
service_manager=cls.service_manager,
integ_test=False,
)
BridgeTools.create_bridge(cls.BRIDGE, cls.BRIDGE)
BridgeTools.create_bridge(cls.UPLINK_BRIDGE, cls.UPLINK_BRIDGE)
BridgeTools.create_veth_pair(cls.VLAN_DEV_IN,
cls.VLAN_DEV_OUT)
# Add to OVS,
BridgeTools.add_ovs_port(cls.UPLINK_BRIDGE,
cls.VLAN_DEV_IN, "70")
BridgeTools.add_ovs_port(cls.UPLINK_BRIDGE,
cls.VLAN_DEV_OUT, "71")
# validate vlan id set
vlan = "10"
BridgeTools.create_bridge(cls.UPLINK_BRIDGE, cls.UPLINK_BRIDGE)
BridgeTools.create_internal_iface(cls.UPLINK_BRIDGE,
cls.UPLINK_DHCP, None)
BridgeTools.create_internal_iface(cls.UPLINK_BRIDGE,
cls.UPLINK_PATCH, None)
BridgeTools.create_internal_iface(cls.UPLINK_BRIDGE,
cls.UPLINK_ETH_PORT, None)
cls.thread = start_ryu_app_thread(test_setup)
cls.uplink_br_controller = uplink_bridge_controller_reference.result()
cls.testing_controller = testing_controller_reference.result()
@classmethod
def tearDownClass(cls):
stop_ryu_app_thread(cls.thread)
BridgeTools.destroy_bridge(cls.BRIDGE)
BridgeTools.destroy_bridge(cls.UPLINK_BRIDGE)
def testFlowSnapshotMatch(self):
cls = self.__class__
assert_bridge_snapshot_match(self, self.UPLINK_BRIDGE, self.service_manager,
include_stats=False)
@unittest.skip
# this reset default GW
class UplinkBridgeWithNonNATTest_IP_VLAN(unittest.TestCase):
BRIDGE = 'testing_br'
MAC_DEST = "5e:cc:cc:b1:49:4b"
BRIDGE_IP = '192.168.128.1'
UPLINK_BRIDGE = 'upt_br0'
UPLINK_DHCP = 'test_dhcp0'
UPLINK_PATCH = 'test_patch_p2'
UPLINK_ETH_PORT = 'test_eth3'
VLAN_TAG='500'
SGi_IP="1.6.5.7"
@classmethod
def setUpClass(cls):
"""
Starts the thread which launches ryu apps
Create a testing bridge, add a port, setup the port interfaces. Then
launch the ryu apps for testing pipelined. Gets the references
to apps launched by using futures.
"""
super(UplinkBridgeWithNonNATTest_IP_VLAN, cls).setUpClass()
warnings.simplefilter('ignore')
cls.service_manager = create_service_manager([])
uplink_bridge_controller_reference = Future()
testing_controller_reference = Future()
test_setup = TestSetup(
apps=[PipelinedController.UplinkBridge,
PipelinedController.Testing,
PipelinedController.StartupFlows],
references={
PipelinedController.UplinkBridge:
uplink_bridge_controller_reference,
PipelinedController.Testing:
testing_controller_reference,
PipelinedController.StartupFlows:
Future(),
},
config={
'bridge_name': cls.BRIDGE,
'bridge_ip_address': cls.BRIDGE_IP,
'ovs_gtp_port_number': 32768,
'clean_restart': True,
'enable_nat': False,
'uplink_bridge': cls.UPLINK_BRIDGE,
'uplink_eth_port_name': cls.UPLINK_ETH_PORT,
'virtual_mac': '02:bb:5e:36:06:4b',
'uplink_patch': cls.UPLINK_PATCH,
'uplink_dhcp_port': cls.UPLINK_DHCP,
'sgi_management_iface_vlan': cls.VLAN_TAG,
'sgi_management_iface_ip_addr': cls.SGi_IP,
'dev_vlan_in': "test_v_in",
'dev_vlan_out': "test_v_out",
},
mconfig=None,
loop=None,
service_manager=cls.service_manager,
integ_test=False,
)
BridgeTools.create_bridge(cls.BRIDGE, cls.BRIDGE)
# validate vlan id set
vlan = "10"
BridgeTools.create_bridge(cls.UPLINK_BRIDGE, cls.UPLINK_BRIDGE)
set_ip_cmd = ["ip",
"addr", "replace",
"2.33.44.6",
"dev",
cls.UPLINK_BRIDGE]
subprocess.check_call(set_ip_cmd)
BridgeTools.create_internal_iface(cls.UPLINK_BRIDGE,
cls.UPLINK_DHCP, None)
BridgeTools.create_internal_iface(cls.UPLINK_BRIDGE,
cls.UPLINK_PATCH, None)
BridgeTools.create_internal_iface(cls.UPLINK_BRIDGE,
cls.UPLINK_ETH_PORT, None)
cls.thread = start_ryu_app_thread(test_setup)
cls.uplink_br_controller = uplink_bridge_controller_reference.result()
cls.testing_controller = testing_controller_reference.result()
@classmethod
def tearDownClass(cls):
stop_ryu_app_thread(cls.thread)
BridgeTools.destroy_bridge(cls.BRIDGE)
BridgeTools.destroy_bridge(cls.UPLINK_BRIDGE)
def testFlowSnapshotMatch(self):
cls = self.__class__
assert_bridge_snapshot_match(self, self.UPLINK_BRIDGE, self.service_manager,
include_stats=False)
self.assertIn(cls.SGi_IP, get_iface_ipv4(cls.UPLINK_BRIDGE), "ip not found")
@unittest.skip
# this reset default GW
class UplinkBridgeWithNonNATTest_IP_VLAN_GW(unittest.TestCase):
BRIDGE = 'testing_br'
MAC_DEST = "5e:cc:cc:b1:49:4b"
BRIDGE_IP = '192.168.128.1'
UPLINK_BRIDGE = 'upt_br0'
UPLINK_DHCP = 'test_dhcp0'
UPLINK_PATCH = 'test_patch_p2'
UPLINK_ETH_PORT = 'test_eth3'
VLAN_TAG = '100'
SGi_IP = "1.6.5.7/24"
SGi_GW = "1.6.5.1"
@classmethod
def setUpClass(cls):
"""
Starts the thread which launches ryu apps
Create a testing bridge, add a port, setup the port interfaces. Then
launch the ryu apps for testing pipelined. Gets the references
to apps launched by using futures.
"""
super(UplinkBridgeWithNonNATTest_IP_VLAN_GW, cls).setUpClass()
warnings.simplefilter('ignore')
cls.service_manager = create_service_manager([])
uplink_bridge_controller_reference = Future()
testing_controller_reference = Future()
test_setup = TestSetup(
apps=[PipelinedController.UplinkBridge,
PipelinedController.Testing,
PipelinedController.StartupFlows],
references={
PipelinedController.UplinkBridge:
uplink_bridge_controller_reference,
PipelinedController.Testing:
testing_controller_reference,
PipelinedController.StartupFlows:
Future(),
},
config={
'bridge_name': cls.BRIDGE,
'bridge_ip_address': cls.BRIDGE_IP,
'ovs_gtp_port_number': 32768,
'clean_restart': True,
'enable_nat': False,
'uplink_bridge': cls.UPLINK_BRIDGE,
'uplink_eth_port_name': cls.UPLINK_ETH_PORT,
'virtual_mac': '02:bb:5e:36:06:4b',
'uplink_patch': cls.UPLINK_PATCH,
'uplink_dhcp_port': cls.UPLINK_DHCP,
'sgi_management_iface_vlan': cls.VLAN_TAG,
'sgi_management_iface_ip_addr': cls.SGi_IP,
'sgi_management_iface_gw': cls.SGi_GW,
'dev_vlan_in': "test_v_in",
'dev_vlan_out': "test_v_out",
},
mconfig=None,
loop=None,
service_manager=cls.service_manager,
integ_test=False,
)
BridgeTools.create_bridge(cls.BRIDGE, cls.BRIDGE)
# validate vlan id set
vlan = "10"
BridgeTools.create_bridge(cls.UPLINK_BRIDGE, cls.UPLINK_BRIDGE)
subprocess.Popen(["ovs-vsctl", "set", "port", cls.UPLINK_BRIDGE,
"tag=" + vlan]).wait()
assert get_ovsdb_port_tag(cls.UPLINK_BRIDGE) == vlan
set_ip_cmd = ["ip",
"addr", "replace",
"2.33.44.6",
"dev",
cls.UPLINK_BRIDGE]
subprocess.check_call(set_ip_cmd)
BridgeTools.create_internal_iface(cls.UPLINK_BRIDGE,
cls.UPLINK_DHCP, None)
BridgeTools.create_internal_iface(cls.UPLINK_BRIDGE,
cls.UPLINK_PATCH, None)
BridgeTools.create_internal_iface(cls.UPLINK_BRIDGE,
cls.UPLINK_ETH_PORT, None)
cls.thread = start_ryu_app_thread(test_setup)
cls.uplink_br_controller = uplink_bridge_controller_reference.result()
cls.testing_controller = testing_controller_reference.result()
@classmethod
def tearDownClass(cls):
stop_ryu_app_thread(cls.thread)
BridgeTools.destroy_bridge(cls.BRIDGE)
BridgeTools.destroy_bridge(cls.UPLINK_BRIDGE)
def testFlowSnapshotMatch(self):
cls = self.__class__
assert_bridge_snapshot_match(self, self.UPLINK_BRIDGE,
self.service_manager,
include_stats=False)
self.assertIn(cls.SGi_GW, get_iface_gw_ipv4(cls.UPLINK_BRIDGE),
"gw not found")
class UplinkBridgeWithNonNatUplinkConnect_Test(unittest.TestCase):
BRIDGE = 'testing_br'
IFACE = 'testing_br'
MAC_DEST = "5e:cc:cc:b1:49:4b"
BRIDGE_IP = '192.168.128.1'
SCRIPT_PATH = "/home/vagrant/magma/lte/gateway/python/magma/mobilityd/"
NET_SW_BR = "net_sw_up1"
UPLINK_DHCP = "tino_dhcp"
SCRIPT_PATH = "/home/vagrant/magma/lte/gateway/python/magma/mobilityd/"
UPLINK_ETH_PORT = "upb_ul_0"
UPLINK_BRIDGE = 'upt_br0'
UPLINK_PATCH = 'test_patch_p2'
ROUTER_IP = "10.55.0.211"
@classmethod
def _setup_vlan_network(cls, vlan: str):
setup_vlan_switch = cls.SCRIPT_PATH + "scripts/setup-uplink-vlan-sw.sh"
subprocess.check_call([setup_vlan_switch, cls.NET_SW_BR, "upb"])
cls._setup_vlan(vlan)
@classmethod
def _setup_vlan(cls, vlan):
setup_vlan_switch = cls.SCRIPT_PATH + "scripts/setup-uplink-vlan-srv.sh"
subprocess.check_call([setup_vlan_switch, cls.NET_SW_BR, vlan, "55"])
@classmethod
def setUpClass(cls):
"""
Starts the thread which launches ryu apps
Create a testing bridge, add a port, setup the port interfaces. Then
launch the ryu apps for testing pipelined. Gets the references
to apps launched by using futures.
"""
super(UplinkBridgeWithNonNatUplinkConnect_Test, cls).setUpClass()
warnings.simplefilter('ignore')
cls.service_manager = create_service_manager([])
cls._setup_vlan_network("0")
BridgeTools.create_bridge(cls.UPLINK_BRIDGE, cls.UPLINK_BRIDGE)
BridgeTools.create_internal_iface(cls.UPLINK_BRIDGE,
cls.UPLINK_DHCP, None)
BridgeTools.create_internal_iface(cls.UPLINK_BRIDGE,
cls.UPLINK_PATCH, None)
check_connectivity(cls.ROUTER_IP, cls.UPLINK_ETH_PORT)
# this is setup after AGW boot up in NATed mode.
uplink_bridge_controller_reference = Future()
testing_controller_reference = Future()
test_setup = TestSetup(
apps=[PipelinedController.UplinkBridge,
PipelinedController.Testing,
PipelinedController.StartupFlows],
references={
PipelinedController.UplinkBridge:
uplink_bridge_controller_reference,
PipelinedController.Testing:
testing_controller_reference,
PipelinedController.StartupFlows:
Future(),
},
config={
'bridge_name': cls.BRIDGE,
'bridge_ip_address': cls.BRIDGE_IP,
'ovs_gtp_port_number': 32768,
'clean_restart': True,
'enable_nat': False,
'uplink_bridge': cls.UPLINK_BRIDGE,
'uplink_eth_port_name': cls.UPLINK_ETH_PORT,
'virtual_mac': '02:bb:5e:36:06:4b',
'uplink_patch': cls.UPLINK_PATCH,
'uplink_dhcp_port': cls.UPLINK_DHCP,
'sgi_management_iface_vlan': "",
'ovs_vlan_workaround': True,
'dev_vlan_in': "testv1_in",
'dev_vlan_out': "testv1_out",
},
mconfig=None,
loop=None,
service_manager=cls.service_manager,
integ_test=False,
)
BridgeTools.create_bridge(cls.BRIDGE, cls.BRIDGE)
cls.thread = start_ryu_app_thread(test_setup)
cls.uplink_br_controller = uplink_bridge_controller_reference.result()
cls.testing_controller = testing_controller_reference.result()
@classmethod
def tearDownClass(cls):
stop_ryu_app_thread(cls.thread)
BridgeTools.destroy_bridge(cls.BRIDGE)
BridgeTools.destroy_bridge(cls.UPLINK_BRIDGE)
BridgeTools.destroy_bridge(cls.NET_SW_BR)
# TODO this test updates resolve.conf, once that is fixed turn-on the test
@unittest.skip
def testFlowSnapshotMatch(self):
cls = self.__class__
assert_bridge_snapshot_match(self, self.UPLINK_BRIDGE, self.service_manager,
include_stats=False)
self.assertEqual(get_ovsdb_port_tag(cls.UPLINK_BRIDGE), '[]')
# after Non NAT init, router shld be accessible.
# manually start DHCP client on up-br
check_connectivity(cls.ROUTER_IP, cls.UPLINK_BRIDGE)
class UplinkBridgeTestNatIPAddr(unittest.TestCase):
BRIDGE = 'testing_br'
MAC_DEST = "5e:cc:cc:b1:49:4b"
BRIDGE_IP = '192.168.128.1'
BRIDGE_ETH_PORT = "eth_t1"
UPLINK_BRIDGE = 'upt_br0'
SGi_IP="1.6.5.77"
@classmethod
def setUpClass(cls):
"""
Starts the thread which launches ryu apps
Create a testing bridge, add a port, setup the port interfaces. Then
launch the ryu apps for testing pipelined. Gets the references
to apps launched by using futures.
"""
super(UplinkBridgeTestNatIPAddr, cls).setUpClass()
warnings.simplefilter('ignore')
cls.service_manager = create_service_manager([])
uplink_bridge_controller_reference = Future()
testing_controller_reference = Future()
test_setup = TestSetup(
apps=[PipelinedController.UplinkBridge,
PipelinedController.Testing,
PipelinedController.StartupFlows],
references={
PipelinedController.UplinkBridge:
uplink_bridge_controller_reference,
PipelinedController.Testing:
testing_controller_reference,
PipelinedController.StartupFlows:
Future(),
},
config={
'bridge_name': cls.BRIDGE,
'bridge_ip_address': cls.BRIDGE_IP,
'ovs_gtp_port_number': 32768,
'clean_restart': True,
'enable_nat': True,
'uplink_bridge': cls.UPLINK_BRIDGE,
'sgi_management_iface_ip_addr': cls.SGi_IP,
'uplink_eth_port_name': cls.BRIDGE_ETH_PORT,
},
mconfig=None,
loop=None,
service_manager=cls.service_manager,
integ_test=False,
)
BridgeTools.create_bridge(cls.BRIDGE, cls.BRIDGE)
BridgeTools.create_bridge(cls.UPLINK_BRIDGE, cls.UPLINK_BRIDGE)
BridgeTools.create_internal_iface(cls.BRIDGE,
cls.BRIDGE_ETH_PORT, '2.2.2.2')
cls.thread = start_ryu_app_thread(test_setup)
cls.uplink_br_controller = uplink_bridge_controller_reference.result()
cls.testing_controller = testing_controller_reference.result()
@classmethod
def tearDownClass(cls):
stop_ryu_app_thread(cls.thread)
BridgeTools.destroy_bridge(cls.BRIDGE)
BridgeTools.destroy_bridge(cls.UPLINK_BRIDGE)
def testFlowSnapshotMatch(self):
cls = self.__class__
assert_bridge_snapshot_match(self, self.UPLINK_BRIDGE, self.service_manager)
self.assertIn(cls.SGi_IP, get_iface_ipv4(cls.BRIDGE_ETH_PORT), "ip not found")
if __name__ == "__main__":
unittest.main()
def check_connectivity(dst: str, dev_name: str):
try:
ifdown_if = ["dhclient", dev_name]
subprocess.check_call(ifdown_if)
except subprocess.SubprocessError as e:
logging.warning("Error while setting dhcl IP: %s: %s",
dev_name, e)
return
hub.sleep(1)
try:
ping_cmd = ["ping", "-c", "3", dst]
subprocess.check_call(ping_cmd)
except subprocess.SubprocessError as e:
logging.warning("Error while ping: %s", e)
# for now dont assert here.
validate_routing_table(dst, dev_name)
def validate_routing_table(dst: str, dev_name: str) -> str:
dump1 = subprocess.Popen(["ip", "r", "get", dst],
stdout=subprocess.PIPE)
for line in dump1.stdout.readlines():
if "dev" not in str(line):
continue
try:
if dev_name in str(line):
return
except ValueError:
pass
logging.error("could not find route to %s via %s", dst, dev_name)
logging.error("dump1: %s", str(dump1))
assert 0
| 37.262295
| 86
| 0.60962
|
80f1141b757779cf6c0a7d62673920cc67f5aa66
| 2,354
|
py
|
Python
|
src/evaluation/MobilenetEvaluation.py
|
DennisMcWherter/SingleImageDataAugmentation
|
a5fb760ce852adcd89498fa8f8b5be1deaf03d26
|
[
"MIT"
] | null | null | null |
src/evaluation/MobilenetEvaluation.py
|
DennisMcWherter/SingleImageDataAugmentation
|
a5fb760ce852adcd89498fa8f8b5be1deaf03d26
|
[
"MIT"
] | null | null | null |
src/evaluation/MobilenetEvaluation.py
|
DennisMcWherter/SingleImageDataAugmentation
|
a5fb760ce852adcd89498fa8f8b5be1deaf03d26
|
[
"MIT"
] | null | null | null |
import logging
import os
import time
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import transforms
from ..interfaces import EvaluationStrategy
from ..models.MobilenetV2 import TestMobilenetV2
from ..torch_utils import convert_samples_to_dataset, test_model
logger = logging.getLogger(__name__)
class MobilenetV2EvaluationStrategy(EvaluationStrategy):
def __init__(self, output_path, num_classes):
""" MobilenetV2 evaluation strategy.
Parameters:
output_path (str): Location where output results file is written
num_classes (int): Number of classes for Mobilenet to classify
"""
self.output_path = output_path
self.num_classes = num_classes
self.loss_fn = nn.CrossEntropyLoss()
def evaluate(self, holdout_set, model_path):
logger.info('Evaluating model at path: {}'.format(model_path))
model = self.__load_model(model_path)
dataset = self.__to_dataset(holdout_set)
start = time.time()
test_results = test_model(model, self.loss_fn, dataset)
end = time.time()
total_time = end - start
logger.info('Done evaluating (took {} seconds)'.format(total_time))
path = os.path.join(self.output_path, 'results.txt')
results = (*test_results, total_time)
result_str = 'Holdout Loss: {}\nHoldout Accuracy: {}\nEvaluation Time: {}\n'.format(*results)
logger.info("Network Results\n----------------\n{}".format(result_str))
self.__write_results(path, result_str)
return path
def __load_model(self, model_path):
model = TestMobilenetV2(num_classes=self.num_classes)
model.load_state_dict(torch.load(model_path))
if torch.cuda.is_available():
model = model.cuda()
model.eval()
return model
def __to_dataset(self, holdout_set):
dataset = convert_samples_to_dataset(holdout_set, transform=transforms.ToTensor())
return DataLoader(dataset, batch_size=25, shuffle=False)
def __write_results(self, path, result_str):
if not os.path.exists(self.output_path):
os.makedirs(self.output_path)
with open(path, 'w') as f:
f.write(result_str)
logger.info("Wrote results output to: {}".format(path))
| 33.15493
| 101
| 0.67672
|
af18c33a642d6877f9a9d9b94ce72e475e6a6263
| 122
|
py
|
Python
|
pluspacket/__init__.py
|
mami-project/PyPLUSPacket
|
6d51fa646801a58ca31adf6c13c6dab530e901fb
|
[
"BSD-2-Clause"
] | null | null | null |
pluspacket/__init__.py
|
mami-project/PyPLUSPacket
|
6d51fa646801a58ca31adf6c13c6dab530e901fb
|
[
"BSD-2-Clause"
] | null | null | null |
pluspacket/__init__.py
|
mami-project/PyPLUSPacket
|
6d51fa646801a58ca31adf6c13c6dab530e901fb
|
[
"BSD-2-Clause"
] | null | null | null |
from pluspacket.packet import *
if __name__ == "__main__":
import tests
import unittest
unittest.main(module='tests')
| 17.428571
| 31
| 0.754098
|
b8d7132dbd7dcf2c4fab11d02573dc5b48b62c38
| 1,878
|
py
|
Python
|
dpm/distributions/kumaraswamy.py
|
nextBillyonair/DPM
|
840ffaafe15c208b200b74094ffa8fe493b4c975
|
[
"MIT"
] | 1
|
2021-07-20T14:02:55.000Z
|
2021-07-20T14:02:55.000Z
|
dpm/distributions/kumaraswamy.py
|
nextBillyonair/DPM
|
840ffaafe15c208b200b74094ffa8fe493b4c975
|
[
"MIT"
] | null | null | null |
dpm/distributions/kumaraswamy.py
|
nextBillyonair/DPM
|
840ffaafe15c208b200b74094ffa8fe493b4c975
|
[
"MIT"
] | null | null | null |
import torch
from torch.nn import Parameter
import dpm.utils as utils
from torch.nn.functional import softplus
from .distribution import Distribution
from .uniform import Uniform
from .transform_distribution import TransformDistribution
from dpm.transforms import Kumaraswamy as kumaraswamy_tform
class Kumaraswamy(Distribution):
def __init__(self, alpha=1., beta=1., learnable=True):
super().__init__()
if not isinstance(alpha, torch.Tensor):
alpha = torch.tensor(alpha).view(1, -1)
if not isinstance(beta, torch.Tensor):
beta = torch.tensor(beta).view(1, -1)
self._alpha = utils.softplus_inverse(alpha.float())
self._beta = utils.softplus_inverse(beta.float())
self.n_dims = len(alpha)
if learnable:
self._alpha = Parameter(self._alpha)
self._beta = Parameter(self._beta)
def create_dist(self):
zero = torch.zeros_like(self._alpha)
one = torch.ones_like(self._alpha)
model = TransformDistribution(Uniform(zero, one, learnable=False),
[kumaraswamy_tform(self.alpha, self.beta, learnable=False)])
return model
def log_prob(self, value):
model = self.create_dist()
return model.log_prob(value)
def sample(self, batch_size):
model = self.create_dist()
return model.sample(batch_size)
def cdf(self, value):
model = self.create_dist()
return model.cdf(value)
def icdf(self, value):
model = self.create_dist()
return model.icdf(value)
@property
def alpha(self):
return softplus(self._alpha)
@property
def beta(self):
return softplus(self._beta)
def get_parameters(self):
return {'alpha':self.alpha.detach().numpy(),
'beta':self.beta.detach().numpy()}
| 31.830508
| 98
| 0.642705
|
03a87eccf09e8ef6696d8b620d69bd79b89b68d4
| 25,176
|
py
|
Python
|
tests/plugins/test_sql.py
|
kant/frictionless-py
|
09cc98e1966d6f97f4eecb47757f45f8a946c5e7
|
[
"MIT"
] | null | null | null |
tests/plugins/test_sql.py
|
kant/frictionless-py
|
09cc98e1966d6f97f4eecb47757f45f8a946c5e7
|
[
"MIT"
] | null | null | null |
tests/plugins/test_sql.py
|
kant/frictionless-py
|
09cc98e1966d6f97f4eecb47757f45f8a946c5e7
|
[
"MIT"
] | null | null | null |
import os
import pytest
import datetime
import sqlalchemy as sa
from frictionless import Table, Package, Resource, exceptions
from frictionless.plugins.sql import SqlDialect, SqlStorage
from dotenv import load_dotenv
load_dotenv(".env")
# Parser
def test_table_sql(database_url):
dialect = SqlDialect(table="data")
with Table(database_url, dialect=dialect) as table:
assert table.schema == {
"fields": [
{"constraints": {"required": True}, "name": "id", "type": "integer"},
{"name": "name", "type": "string"},
],
"primaryKey": ["id"],
}
assert table.header == ["id", "name"]
assert table.read_data() == [[1, "english"], [2, "中国人"]]
def test_table_sql_order_by(database_url):
dialect = SqlDialect(table="data", order_by="id")
with Table(database_url, dialect=dialect) as table:
assert table.header == ["id", "name"]
assert table.read_data() == [[1, "english"], [2, "中国人"]]
def test_table_sql_order_by_desc(database_url):
dialect = SqlDialect(table="data", order_by="id desc")
with Table(database_url, dialect=dialect) as table:
assert table.header == ["id", "name"]
assert table.read_data() == [[2, "中国人"], [1, "english"]]
def test_table_sql_table_is_required_error(database_url):
table = Table(database_url)
with pytest.raises(exceptions.FrictionlessException) as excinfo:
table.open()
error = excinfo.value.error
assert error.code == "dialect-error"
assert error.note.count("'table' is a required property")
def test_table_sql_headers_false(database_url):
dialect = SqlDialect(table="data")
with Table(database_url, dialect=dialect, headers=False) as table:
assert table.header == []
assert table.read_data() == [["id", "name"], [1, "english"], [2, "中国人"]]
def test_table_sql_write(database_url):
source = "data/table.csv"
dialect = SqlDialect(table="name", order_by="id")
with Table(source) as table:
table.write(database_url, dialect=dialect)
with Table(database_url, dialect=dialect) as table:
assert table.header == ["id", "name"]
assert table.read_data() == [[1, "english"], [2, "中国人"]]
# Storage
def test_storage_types(database_url):
engine = sa.create_engine(database_url)
prefix = "prefix_"
# Export/Import
source = Package("data/storage/types.json")
storage = source.to_sql(engine=engine, prefix=prefix, force=True)
target = Package.from_sql(engine=engine, prefix=prefix)
# Assert metadata
assert target.get_resource("types").schema == {
"fields": [
{"name": "any", "type": "string"}, # type fallback
{"name": "array", "type": "string"}, # type fallback
{"name": "boolean", "type": "boolean"},
{"name": "date", "type": "date"},
{"name": "date_year", "type": "date"}, # format removal
{"name": "datetime", "type": "datetime"},
{"name": "duration", "type": "string"}, # type fallback
{"name": "geojson", "type": "string"}, # type fallback
{"name": "geopoint", "type": "string"}, # type fallback
{"name": "integer", "type": "integer"},
{"name": "number", "type": "number"},
{"name": "object", "type": "string"}, # type fallback
{"name": "string", "type": "string"},
{"name": "time", "type": "time"},
{"name": "year", "type": "integer"}, # type downgrade
{"name": "yearmonth", "type": "string"}, # type fallback
],
}
# Assert data
assert target.get_resource("types").read_rows() == [
{
"any": "中国人",
"array": '["Mike", "John"]',
"boolean": True,
"date": datetime.date(2015, 1, 1),
"date_year": datetime.date(2015, 1, 1),
"datetime": datetime.datetime(2015, 1, 1, 3, 0),
"duration": "P1Y1M",
"geojson": '{"type": "Point", "coordinates": [33, 33.33]}',
"geopoint": "30,70",
"integer": 1,
"number": 7,
"object": '{"chars": 560}',
"string": "english",
"time": datetime.time(3, 0),
"year": 2015,
"yearmonth": "2015-01",
},
]
# Cleanup storage
storage.delete_package(target.resource_names)
def test_storage_integrity(database_url):
engine = sa.create_engine(database_url)
prefix = "prefix_"
# Export/Import
source = Package("data/storage/integrity.json")
storage = source.to_sql(engine=engine, prefix=prefix, force=True)
target = Package.from_sql(engine=engine, prefix=prefix)
# Assert metadata (main)
assert target.get_resource("integrity_main").schema == {
"fields": [
# added required
{"name": "id", "type": "integer", "constraints": {"required": True}},
{"name": "parent", "type": "integer"},
{"name": "description", "type": "string"},
],
"primaryKey": ["id"],
"foreignKeys": [
{"fields": ["parent"], "reference": {"resource": "", "fields": ["id"]}}
],
}
# Assert metadata (link)
assert target.get_resource("integrity_link").schema == {
"fields": [
# added required
{"name": "main_id", "type": "integer", "constraints": {"required": True}},
# added required; removed unique
{"name": "some_id", "type": "integer", "constraints": {"required": True}},
# removed unique
{"name": "description", "type": "string"},
],
"primaryKey": ["main_id", "some_id"],
"foreignKeys": [
{
"fields": ["main_id"],
"reference": {"resource": "integrity_main", "fields": ["id"]},
}
],
}
# Assert data (main)
assert target.get_resource("integrity_main").read_rows() == [
{"id": 1, "parent": None, "description": "english"},
{"id": 2, "parent": 1, "description": "中国人"},
]
# Assert data (link)
assert target.get_resource("integrity_link").read_rows() == [
{"main_id": 1, "some_id": 1, "description": "note1"},
{"main_id": 2, "some_id": 2, "description": "note2"},
]
# Cleanup storage
storage.delete_package(target.resource_names)
def test_storage_constraints(database_url):
engine = sa.create_engine(database_url)
prefix = "prefix_"
# Export/Import
source = Package("data/storage/constraints.json")
storage = source.to_sql(engine=engine, prefix=prefix, force=True)
target = Package.from_sql(engine=engine, prefix=prefix)
# Assert metadata
assert target.get_resource("constraints").schema == {
"fields": [
{"name": "required", "type": "string", "constraints": {"required": True}},
{"name": "minLength", "type": "string"}, # constraint removal
{"name": "maxLength", "type": "string"}, # constraint removal
{"name": "pattern", "type": "string"}, # constraint removal
{"name": "enum", "type": "string"}, # constraint removal
{"name": "minimum", "type": "integer"}, # constraint removal
{"name": "maximum", "type": "integer"}, # constraint removal
],
}
# Assert data
assert target.get_resource("constraints").read_rows() == [
{
"required": "passing",
"minLength": "passing",
"maxLength": "passing",
"pattern": "passing",
"enum": "passing",
"minimum": 5,
"maximum": 5,
},
]
# Cleanup storage
storage.delete_package(target.resource_names)
@pytest.mark.parametrize(
"field_name, cell",
[
("required", ""),
("minLength", "bad"),
("maxLength", "badbadbad"),
("pattern", "bad"),
("enum", "bad"),
("minimum", 3),
("maximum", 9),
],
)
def test_storage_constraints_not_valid_error(database_url, field_name, cell):
engine = sa.create_engine(database_url)
package = Package("data/storage/constraints.json")
resource = package.get_resource("constraints")
# We set an invalid cell to the data property
for index, field in enumerate(resource.schema.fields):
if field.name == field_name:
resource.data[1][index] = cell
# NOTE: should we wrap these exceptions?
with pytest.raises(sa.exc.IntegrityError):
resource.to_sql(engine=engine, force=True)
def test_storage_read_resource_not_existent_error(database_url):
engine = sa.create_engine(database_url)
storage = SqlStorage(engine=engine)
with pytest.raises(exceptions.FrictionlessException) as excinfo:
storage.read_resource("bad")
error = excinfo.value.error
assert error.code == "storage-error"
assert error.note.count("does not exist")
def test_storage_write_resource_existent_error(database_url):
engine = sa.create_engine(database_url)
resource = Resource(path="data/table.csv")
storage = resource.to_sql(engine=engine)
with pytest.raises(exceptions.FrictionlessException) as excinfo:
storage.write_resource(resource)
error = excinfo.value.error
assert error.code == "storage-error"
assert error.note.count("already exists")
# Cleanup storage
storage.delete_package(list(storage))
def test_storage_delete_resource_not_existent_error(database_url):
engine = sa.create_engine(database_url)
storage = SqlStorage(engine=engine)
with pytest.raises(exceptions.FrictionlessException) as excinfo:
storage.delete_resource("bad")
error = excinfo.value.error
assert error.code == "storage-error"
assert error.note.count("does not exist")
def test_storage_views_support(database_url):
engine = sa.create_engine(database_url)
engine.execute("CREATE VIEW data_view AS SELECT * FROM data")
storage = SqlStorage(engine=engine)
resource = storage.read_resource("data_view")
assert resource.schema == {
"fields": [
{"name": "id", "type": "integer"},
{"name": "name", "type": "string"},
]
}
assert resource.read_rows() == [
{"id": 1, "name": "english"},
{"id": 2, "name": "中国人"},
]
# Storage (PostgreSQL)
@pytest.mark.ci
def test_postgresql_storage_types():
engine = sa.create_engine(os.environ["POSTGRESQL_URL"])
prefix = "prefix_"
# Export/Import
source = Package("data/storage/types.json")
storage = source.to_sql(engine=engine, prefix=prefix, force=True)
target = Package.from_sql(engine=engine, prefix=prefix)
# Assert metadata
assert target.get_resource("types").schema == {
"fields": [
{"name": "any", "type": "string"}, # type fallback
{"name": "array", "type": "object"}, # type downgrade
{"name": "boolean", "type": "boolean"},
{"name": "date", "type": "date"},
{"name": "date_year", "type": "date"}, # format removal
{"name": "datetime", "type": "datetime"},
{"name": "duration", "type": "string"}, # type fallback
{"name": "geojson", "type": "object"}, # type downgrade
{"name": "geopoint", "type": "string"}, # type fallback
{"name": "integer", "type": "integer"},
{"name": "number", "type": "number"},
{"name": "object", "type": "object"},
{"name": "string", "type": "string"},
{"name": "time", "type": "time"},
{"name": "year", "type": "integer"}, # type downgrade
{"name": "yearmonth", "type": "string"}, # type fallback
],
}
# Assert data
assert target.get_resource("types").read_rows() == [
{
"any": "中国人",
"array": None, # TODO: fix array
"boolean": True,
"date": datetime.date(2015, 1, 1),
"date_year": datetime.date(2015, 1, 1),
"datetime": datetime.datetime(2015, 1, 1, 3, 0),
"duration": "P1Y1M",
"geojson": {"type": "Point", "coordinates": [33, 33.33]},
"geopoint": "30,70",
"integer": 1,
"number": 7,
"object": {"chars": 560},
"string": "english",
"time": datetime.time(3, 0),
"year": 2015,
"yearmonth": "2015-01",
},
]
# Cleanup storage
storage.delete_package(target.resource_names)
@pytest.mark.ci
def test_postgresql_storage_integrity():
engine = sa.create_engine(os.environ["POSTGRESQL_URL"])
prefix = "prefix_"
# Export/Import
source = Package("data/storage/integrity.json")
storage = source.to_sql(engine=engine, prefix=prefix, force=True)
target = Package.from_sql(engine=engine, prefix=prefix)
# Assert metadata (main)
assert target.get_resource("integrity_main").schema == {
"fields": [
# added required
{"name": "id", "type": "integer", "constraints": {"required": True}},
{"name": "parent", "type": "integer"},
{"name": "description", "type": "string"},
],
"primaryKey": ["id"],
"foreignKeys": [
{"fields": ["parent"], "reference": {"resource": "", "fields": ["id"]}}
],
}
# Assert metadata (link)
assert target.get_resource("integrity_link").schema == {
"fields": [
# added required
{"name": "main_id", "type": "integer", "constraints": {"required": True}},
# added required; removed unique
{"name": "some_id", "type": "integer", "constraints": {"required": True}},
# removed unique
{"name": "description", "type": "string"},
],
"primaryKey": ["main_id", "some_id"],
"foreignKeys": [
{
"fields": ["main_id"],
"reference": {"resource": "integrity_main", "fields": ["id"]},
}
],
}
# Assert data (main)
assert target.get_resource("integrity_main").read_rows() == [
{"id": 1, "parent": None, "description": "english"},
{"id": 2, "parent": 1, "description": "中国人"},
]
# Assert data (link)
assert target.get_resource("integrity_link").read_rows() == [
{"main_id": 1, "some_id": 1, "description": "note1"},
{"main_id": 2, "some_id": 2, "description": "note2"},
]
# Cleanup storage
storage.delete_package(target.resource_names)
# TODO: recover enum support
@pytest.mark.ci
@pytest.mark.skip
def test_postgresql_storage_constraints(database_url):
engine = sa.create_engine(os.environ["POSTGRESQL_URL"])
prefix = "prefix_"
# Export/Import
source = Package("data/storage/constraints.json")
storage = source.to_sql(engine=engine, prefix=prefix, force=True)
target = Package.from_sql(engine=engine, prefix=prefix)
# Assert metadata
assert target.get_resource("constraints").schema == {
"fields": [
{"name": "required", "type": "string", "constraints": {"required": True}},
{"name": "minLength", "type": "string"}, # constraint removal
{"name": "maxLength", "type": "string"}, # constraint removal
{"name": "pattern", "type": "string"}, # constraint removal
{"name": "enum", "type": "string"}, # constraint removal
{"name": "minimum", "type": "integer"}, # constraint removal
{"name": "maximum", "type": "integer"}, # constraint removal
],
}
# Assert data
assert target.get_resource("constraints").read_rows() == [
{
"required": "passing",
"minLength": "passing",
"maxLength": "passing",
"pattern": "passing",
"enum": "passing",
"minimum": 5,
"maximum": 5,
},
]
# Cleanup storage
storage.delete_package(target.resource_names)
@pytest.mark.ci
@pytest.mark.parametrize(
"field_name, cell",
[
("required", ""),
("minLength", "bad"),
("maxLength", "badbadbad"),
("pattern", "bad"),
("enum", "bad"),
("minimum", 3),
("maximum", 9),
],
)
def test_postgresql_storage_constraints_not_valid_error(database_url, field_name, cell):
engine = sa.create_engine(os.environ["POSTGRESQL_URL"])
package = Package("data/storage/constraints.json")
resource = package.get_resource("constraints")
# We set an invalid cell to the data property
for index, field in enumerate(resource.schema.fields):
if field.name == field_name:
resource.data[1][index] = cell
with pytest.raises((sa.exc.IntegrityError, sa.exc.DataError)):
resource.to_sql(engine=engine, force=True)
@pytest.mark.ci
def test_postgresql_storage_views_support():
engine = sa.create_engine(os.environ["POSTGRESQL_URL"])
engine.execute("DROP VIEW IF EXISTS data_view")
engine.execute("DROP TABLE IF EXISTS data")
engine.execute("CREATE TABLE data (id INTEGER PRIMARY KEY, name TEXT)")
engine.execute("INSERT INTO data VALUES (1, 'english'), (2, '中国人')")
engine.execute("CREATE VIEW data_view AS SELECT * FROM data")
storage = SqlStorage(engine=engine)
resource = storage.read_resource("data_view")
assert resource.schema == {
"fields": [
{"name": "id", "type": "integer"},
{"name": "name", "type": "string"},
]
}
assert resource.read_rows() == [
{"id": 1, "name": "english"},
{"id": 2, "name": "中国人"},
]
# Storage (MySQL)
@pytest.mark.ci
def test_mysql_storage_types():
engine = sa.create_engine(os.environ["MYSQL_URL"])
prefix = "prefix_"
# Export/Import
source = Package("data/storage/types.json")
storage = source.to_sql(engine=engine, prefix=prefix, force=True)
target = Package.from_sql(engine=engine, prefix=prefix)
# Assert metadata
assert target.get_resource("types").schema == {
"fields": [
{"name": "any", "type": "string"}, # type fallback
{"name": "array", "type": "string"}, # type fallback
{"name": "boolean", "type": "integer"}, # type downgrade
{"name": "date", "type": "date"},
{"name": "date_year", "type": "date"}, # format removal
{"name": "datetime", "type": "datetime"},
{"name": "duration", "type": "string"}, # type fallback
{"name": "geojson", "type": "string"}, # type fallback
{"name": "geopoint", "type": "string"}, # type fallback
{"name": "integer", "type": "integer"},
{"name": "number", "type": "number"},
{"name": "object", "type": "string"}, # type fallback
{"name": "string", "type": "string"},
{"name": "time", "type": "time"},
{"name": "year", "type": "integer"}, # type downgrade
{"name": "yearmonth", "type": "string"}, # type fallback
],
}
# Assert data
assert target.get_resource("types").read_rows() == [
{
"any": "中国人",
"array": '["Mike", "John"]',
"boolean": True,
"date": datetime.date(2015, 1, 1),
"date_year": datetime.date(2015, 1, 1),
"datetime": datetime.datetime(2015, 1, 1, 3, 0),
"duration": "P1Y1M",
"geojson": '{"type": "Point", "coordinates": [33, 33.33]}',
"geopoint": "30,70",
"integer": 1,
"number": 7,
"object": '{"chars": 560}',
"string": "english",
"time": datetime.time(3, 0),
"year": 2015,
"yearmonth": "2015-01",
},
]
# Cleanup storage
storage.delete_package(target.resource_names)
# TODO: fix unique for MySQL
@pytest.mark.ci
@pytest.mark.skip
def test_mysql_storage_integrity():
engine = sa.create_engine(os.environ["MYSQL_URL"])
prefix = "prefix_"
# Export/Import
source = Package("data/storage/integrity.json")
storage = source.to_sql(engine=engine, prefix=prefix, force=True)
target = Package.from_sql(engine=engine, prefix=prefix)
# Assert metadata (main)
assert target.get_resource("integrity_main").schema == {
"fields": [
# added required
{"name": "id", "type": "integer", "constraints": {"required": True}},
{"name": "parent", "type": "integer"},
{"name": "description", "type": "string"},
],
"primaryKey": ["id"],
"foreignKeys": [
{"fields": ["parent"], "reference": {"resource": "", "fields": ["id"]}}
],
}
# Assert metadata (link)
assert target.get_resource("integrity_link").schema == {
"fields": [
# added required
{"name": "main_id", "type": "integer", "constraints": {"required": True}},
# added required; removed unique
{"name": "some_id", "type": "integer", "constraints": {"required": True}},
# removed unique
{"name": "description", "type": "string"},
],
"primaryKey": ["main_id", "some_id"],
"foreignKeys": [
{
"fields": ["main_id"],
"reference": {"resource": "integrity_main", "fields": ["id"]},
}
],
}
# Assert data (main)
assert target.get_resource("main").read_rows() == [
{"id": 1, "parent": None, "description": "english"},
{"id": 2, "parent": 1, "description": "中国人"},
]
# Assert data (link)
assert target.get_resource("link").read_rows() == [
{"main_id": 1, "some_id": 1, "description": "note1"},
{"main_id": 2, "some_id": 2, "description": "note2"},
]
# Cleanup storage
storage.delete_package(target.resource_names)
# TODO: fix enum for MySQL
@pytest.mark.ci
@pytest.mark.skip
def test_mysql_storage_constraints():
engine = sa.create_engine(os.environ["MYSQL_URL"])
prefix = "prefix_"
# Export/Import
source = Package("data/storage/constraints.json")
storage = source.to_sql(engine=engine, prefix=prefix, force=True)
target = Package.from_sql(engine=engine, prefix=prefix)
# Assert metadata
assert target.get_resource("constraints").schema == {
"fields": [
{"name": "required", "type": "string", "constraints": {"required": True}},
{"name": "minLength", "type": "string"}, # constraint removal
{"name": "maxLength", "type": "string"}, # constraint removal
{"name": "pattern", "type": "string"}, # constraint removal
{"name": "enum", "type": "string"}, # constraint removal
{"name": "minimum", "type": "integer"}, # constraint removal
{"name": "maximum", "type": "integer"}, # constraint removal
],
}
# Assert data
assert target.get_resource("constraints").read_rows() == [
{
"required": "passing",
"minLength": "passing",
"maxLength": "passing",
"pattern": "passing",
"enum": "passing",
"minimum": 5,
"maximum": 5,
},
]
# Cleanup storage
storage.delete_package(target.resource_names)
# TODO: fix consratins for MySQL
@pytest.mark.ci
@pytest.mark.skip
@pytest.mark.parametrize(
"field_name, cell",
[
("required", ""),
("minLength", "bad"),
("maxLength", "badbadbad"),
("pattern", "bad"),
("enum", "bad"),
("minimum", 3),
("maximum", 9),
],
)
def test_mysql_storage_constraints_not_valid_error(field_name, cell):
engine = sa.create_engine(os.environ["MYSQL_URL"])
package = Package("data/storage/constraints.json")
resource = package.get_resource("constraints")
# We set an invalid cell to the data property
for index, field in enumerate(resource.schema.fields):
if field.name == field_name:
resource.data[1][index] = cell
# NOTE: should we wrap these exceptions?
with pytest.raises(sa.exc.IntegrityError):
resource.to_sql(engine=engine, force=True)
@pytest.mark.ci
def test_mysql_storage_views_support():
engine = sa.create_engine(os.environ["MYSQL_URL"])
engine.execute("DROP VIEW IF EXISTS data_view")
engine.execute("DROP TABLE IF EXISTS data")
engine.execute("CREATE TABLE data (id INTEGER PRIMARY KEY, name TEXT)")
engine.execute("INSERT INTO data VALUES (1, 'english'), (2, '中国人')")
engine.execute("CREATE VIEW data_view AS SELECT * FROM data")
storage = SqlStorage(engine=engine)
resource = storage.read_resource("data_view")
assert resource.schema == {
"fields": [
{"name": "id", "type": "integer"},
{"name": "name", "type": "string"},
]
}
assert resource.read_rows() == [
{"id": 1, "name": "english"},
{"id": 2, "name": "中国人"},
]
| 34.869806
| 88
| 0.564665
|
8c59205f1e2c9bb33249be3b7cf547fb5e6477d2
| 9,122
|
py
|
Python
|
src/extensions/COMMANDS/StatusCommand.py
|
LaudateCorpus1/python-redfish-utility
|
75dbcde6e4495c2369008fc4f6dd5f78edd305db
|
[
"Apache-2.0"
] | null | null | null |
src/extensions/COMMANDS/StatusCommand.py
|
LaudateCorpus1/python-redfish-utility
|
75dbcde6e4495c2369008fc4f6dd5f78edd305db
|
[
"Apache-2.0"
] | null | null | null |
src/extensions/COMMANDS/StatusCommand.py
|
LaudateCorpus1/python-redfish-utility
|
75dbcde6e4495c2369008fc4f6dd5f78edd305db
|
[
"Apache-2.0"
] | null | null | null |
###
# Copyright 2016-2021 Hewlett Packard Enterprise, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
# -*- coding: utf-8 -*-
""" Status Command for RDMC """
from redfish.ris.utils import merge_dict
from rdmc_helper import (
ReturnCodes,
InvalidCommandLineErrorOPTS,
Encryption,
NoCurrentSessionEstablished,
)
class StatusCommand:
"""Constructor"""
def __init__(self):
self.ident = {
"name": "status",
"usage": None,
"description": "Run to display all pending changes within"
" the currently\n\tselected type that need to be"
" committed\n\texample: status",
"summary": "Displays all pending changes within a selected type"
" that need to be committed.",
"aliases": [],
"auxcommands": ["SelectCommand"],
}
self.cmdbase = None
self.rdmc = None
self.auxcommands = dict()
def run(self, line, help_disp=False):
"""Main status worker function
:param line: command line input
:type line: string.
"""
if help_disp:
self.parser.print_help()
return ReturnCodes.SUCCESS
try:
(options, _) = self.rdmc.rdmc_parse_arglist(self, line)
except (InvalidCommandLineErrorOPTS, SystemExit):
if ("-h" in line) or ("--help" in line):
return ReturnCodes.SUCCESS
else:
raise InvalidCommandLineErrorOPTS("")
self.statusvalidation(options)
contents = self.rdmc.app.status()
selector = self.rdmc.app.selector
if contents and options.json:
self.jsonout(contents)
elif contents:
self.outputpatches(contents, selector)
else:
self.rdmc.ui.printer("No changes found\n")
# Return code
return ReturnCodes.SUCCESS
def jsonout(self, contents):
"""Helper function to print json output of patches
:param contents: contents for the selection
:type contents: string.
"""
self.rdmc.ui.printer("Current changes found:\n")
createdict = lambda y, x: {x: y}
totdict = {}
for item in contents:
for keypath, value in item.items():
path = keypath.split("(")[1].strip("()")
cont = {}
totdict[path] = cont
for content in value:
val = (
["List Manipulation"]
if content["op"] == "move"
else [content["value"].strip("\"'")]
if len(content["value"])
else [""]
)
cont = reduce(
createdict,
reversed([path] + content["path"].strip("/").split("/") + val),
)
merge_dict(totdict, cont)
self.rdmc.ui.print_out_json(totdict)
def outputpatches(self, contents, selector):
"""Helper function for status for use in patches
:param contents: contents for the selection
:type contents: string.
:param selector: type selected
:type selector: string.
"""
self.rdmc.ui.printer("Current changes found:\n")
for item in contents:
moveoperation = ""
for key, value in item.items():
if selector and key.lower().startswith(selector.lower()):
self.rdmc.ui.printer("%s (Currently selected)\n" % key)
else:
self.rdmc.ui.printer("%s\n" % key)
for content in value:
try:
if content["op"] == "move":
moveoperation = "/".join(content["path"].split("/")[1:-1])
continue
except:
if content[0]["op"] == "move":
moveoperation = "/".join(
content[0]["path"].split("/")[1:-1]
)
continue
try:
if isinstance(content[0]["value"], int):
self.rdmc.ui.printer(
"\t%s=%s"
% (content[0]["path"][1:], content[0]["value"])
)
elif (
not isinstance(content[0]["value"], bool)
and not len(content[0]["value"]) == 0
):
if (
content[0]["value"][0] == '"'
and content[0]["value"][-1] == '"'
):
self.rdmc.ui.printer(
"\t%s=%s"
% (
content[0]["path"][1:],
content[0]["value"][1:-1],
)
)
else:
self.rdmc.ui.printer(
"\t%s=%s"
% (content[0]["path"][1:], content[0]["value"])
)
else:
output = content[0]["value"]
if not isinstance(output, bool):
if len(output) == 0:
output = '""'
self.rdmc.ui.printer(
"\t%s=%s" % (content[0]["path"][1:], output)
)
except:
if isinstance(content["value"], int):
self.rdmc.ui.printer(
"\t%s=%s" % (content["path"][1:], content["value"])
)
elif (
content["value"]
and not isinstance(content["value"], bool)
and not len(content["value"]) == 0
):
if (
content["value"][0] == '"'
and content["value"][-1] == '"'
):
self.rdmc.ui.printer(
"\t%s=%s" % (content["path"][1:], content["value"])
)
else:
self.rdmc.ui.printer(
"\t%s=%s" % (content["path"][1:], content["value"])
)
else:
output = content["value"]
if output and not isinstance(output, bool):
if len(output) == 0:
output = '""'
self.rdmc.ui.printer(
"\t%s=%s" % (content["path"][1:], output)
)
self.rdmc.ui.printer("\n")
if moveoperation:
self.rdmc.ui.printer("\t%s=List Manipulation\n" % moveoperation)
def statusvalidation(self, options):
"""Status method validation function"""
self.cmdbase.login_select_validation(self, options)
def definearguments(self, customparser):
"""Wrapper function for new command main function
:param customparser: command line input
:type customparser: parser.
"""
if not customparser:
return
self.cmdbase.add_login_arguments_group(customparser)
customparser.add_argument(
"-j",
"--json",
dest="json",
action="store_true",
help="Optionally include this flag if you wish to change the"
" displayed output to JSON format. Preserving the JSON data"
" structure makes the information easier to parse.",
default=False,
)
| 38.982906
| 88
| 0.420851
|
4f711d085fd511e0364d4f914c46337657e67239
| 3,551
|
py
|
Python
|
rplugin/python3/padawan_navigator/__init__.py
|
phux/padawan-navigator
|
3a509232bfe530369cc83d26213612ee38f535b8
|
[
"Unlicense"
] | 3
|
2017-11-27T03:28:13.000Z
|
2020-05-04T15:30:00.000Z
|
rplugin/python3/padawan_navigator/__init__.py
|
phux/padawan-navigator
|
3a509232bfe530369cc83d26213612ee38f535b8
|
[
"Unlicense"
] | null | null | null |
rplugin/python3/padawan_navigator/__init__.py
|
phux/padawan-navigator
|
3a509232bfe530369cc83d26213612ee38f535b8
|
[
"Unlicense"
] | null | null | null |
import neovim
from os import path
from urllib.error import URLError
from socket import timeout
import sys
import re
from padawan_navigator import server # noqa
@neovim.plugin
class PadawanNavigatorPlugin(object):
def __init__(self, vim):
self.vim = vim
self.current = vim.current
server_addr = self.vim.eval(
'g:padawan_navigator#server_addr')
server_command = self.vim.eval(
'g:padawan_navigator#server_command')
log_file = self.vim.eval(
'g:padawan_navigator#log_file')
self.server = server.Server(server_addr, server_command, log_file)
@neovim.function("PadawanGetParents", sync=True)
def padawangetparentclass_handler(self, args):
file_path = self.current.buffer.name
current_path = self.get_project_root(file_path)
[line_num, column_num] = self.current.window.cursor
contents = "\n".join(self.current.buffer)
params = {
'filepath': file_path.replace(current_path, ""),
'line': line_num,
'column': column_num,
'path': current_path,
'navigationtype': 'find-parents'
}
result = self.do_request('navigate', params, contents)
if not result or 'parents' not in result or not result['parents']:
self.vim.command("echom 'no parents found'")
else:
self.vim.command("call padawan_navigator#PopulateList(%s)" % result['parents'])
@neovim.function("PadawanGetImplementations", sync=True)
def padawangetimplementations_handler(self, args):
file_path = self.current.buffer.name
current_path = self.get_project_root(file_path)
[line_num, column_num] = self.current.window.cursor
contents = "\n".join(self.current.buffer)
params = {
'filepath': file_path.replace(current_path, ""),
'line': line_num,
'column': column_num,
'path': current_path,
'navigationtype': 'find-implementations'
}
result = self.do_request('navigate', params, contents)
if not result or 'children' not in result or not result['children']:
self.vim.command("echom 'no implementations found'")
else:
self.vim.command("call padawan_navigator#PopulateList(%s)" % result['children'])
def do_request(self, command, params, data=''):
try:
return self.server.sendRequest(command, params, data)
except URLError:
if self.vim.eval('g:padawan_navigator#server_autostart') == 1:
self.server.start()
self.vim.command(
"echom 'Padawan.php server started automatically'")
else:
self.vim.command("echom 'Padawan.php is not running'")
except timeout:
self.vim.command("echom 'Connection to padawan.php timed out'")
except ValueError as error:
self.vim.command("echom 'Padawan.php error: {}'".format(error))
# any other error can bouble to deoplete
return False
def get_project_root(self, file_path):
current_path = path.dirname(file_path)
while current_path != '/' and not path.exists(
path.join(current_path, 'composer.json')
):
current_path = path.dirname(current_path)
if current_path == '/':
current_path = path.dirname(file_path)
return current_path
| 36.608247
| 92
| 0.610532
|
40e33a38a5c5be25d709329cfeaf79ee56702bdc
| 7,117
|
py
|
Python
|
data/data_utils.py
|
BrancoLab/LocomotionControl
|
6dc16c29c13b31f6ad70af954a237e379ee10846
|
[
"MIT"
] | null | null | null |
data/data_utils.py
|
BrancoLab/LocomotionControl
|
6dc16c29c13b31f6ad70af954a237e379ee10846
|
[
"MIT"
] | 2
|
2020-11-23T16:32:11.000Z
|
2020-11-23T16:32:11.000Z
|
data/data_utils.py
|
BrancoLab/LocomotionControl
|
6dc16c29c13b31f6ad70af954a237e379ee10846
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from typing import Union, Tuple
from loguru import logger
from scipy import stats
from scipy.signal import medfilt
from fcutils.maths.signals import get_onset_offset
from data.debug_utils import plot_signal_and_events
KEYS = (
"x",
"y",
"segment",
"global_coord",
"speed",
"orientation",
"direction_of_movement",
"angular_velocity",
"spikes",
"firing_rate",
"dmov_velocity",
)
def register_in_time(trials, n_samples):
'''
Given a list of 1d numpy arrays of different length,
this function returns an array of shape (n_samples, n_trials) so
that each trial has the same number of samples and can thus be averaged
nicely
'''
target = np.zeros((n_samples, len(trials)))
for trial_n, trial in enumerate(trials):
n = len(trial)
for i in range(n_samples):
idx = int(np.floor(n * (i / n_samples)))
target[i, trial_n] = trial[idx]
return target
def remove_outlier_values(
data: np.ndarray,
threshold: Union[Tuple[float, float], float],
errors_calculation_array: np.ndarray = None,
) -> np.ndarray:
"""
Removes extreme values form an array by setting them to nan and interpolating what's left
"""
dtype = data.dtype
# find where extreme values are
if errors_calculation_array is None:
errors_calculation_array = data.copy()
if isinstance(threshold, tuple):
errors = np.where(
(errors_calculation_array > threshold[0])
& (errors_calculation_array < threshold[1])
)[0]
else:
errors = np.where(errors_calculation_array > threshold)[0]
data[errors - 1] = np.nan
data[errors] = np.nan
data = interpolate_nans(data=data)["data"]
return np.array(list(data.values())).astype(dtype)
def convolve_with_gaussian(
data: np.ndarray, kernel_width: int = 21
) -> np.ndarray:
"""
Convolves a 1D array with a gaussian kernel of given width
"""
# create kernel and normalize area under curve
norm = stats.norm(0, kernel_width)
X = np.linspace(norm.ppf(0.0001), norm.ppf(0.9999), kernel_width)
_kernel = norm.pdf(X)
kernel = _kernel / np.sum(_kernel)
padded = np.pad(data, 2*kernel_width, mode="edge")
return np.convolve(padded, kernel, mode="same")[2*kernel_width:-2*kernel_width]
def get_bouts_tracking_stacked(
tracking: Union[pd.Series, pd.DataFrame], bouts: pd.DataFrame
) -> pd.DataFrame:
"""
Creates a dataframe with the tracking data of each bout stacked
"""
columns = (
tracking.columns
if isinstance(tracking, pd.DataFrame)
else tracking.index
)
results = {k: [] for k in KEYS if k in columns}
for i, bout in bouts.iterrows():
for key in results.keys():
results[key].extend(
list(tracking[key][bout.start_frame : bout.end_frame])
)
return pd.DataFrame(results)
def pd_series_to_df(series: pd.Series) -> pd.DataFrame:
"""
Converts a series to a dataframe
"""
keys = [k for k in KEYS if k in list(series.index)]
return pd.DataFrame({k: series[k] for k in keys})
def get_event_times(
data: np.ndarray,
kernel_size: int = 71,
skip_first: int = 20 * 60,
th: float = 0.1,
abs_val: bool = False,
shift: int = 0,
debug: bool = False,
) -> Tuple[list, list]:
"""
Given a 1D time serires it gets all the times there's a new 'stimulus' (signal going > threshold).
"""
original = data.copy()
if abs_val:
data = np.abs(data)
if kernel_size is not None:
data = medfilt(data, kernel_size=kernel_size)
onsets, offsets = get_onset_offset(data, th=th)
onsets = [on for on in onsets if on > offsets[0]]
# skip onsets that occurred soon after the session start
onsets = [on - shift for on in onsets if on > skip_first]
offsets = [off - shift for off in offsets if off > onsets[0]]
# check
if len(onsets) != len(offsets):
raise ValueError("Error while getting event times")
for on, off in zip(onsets, offsets):
if on > off:
raise ValueError("Onsets cant be after offset")
if debug:
logger.debug(f"Found {len(onsets)} event times")
# visualize event times
plot_signal_and_events(
data, onsets, offsets, second_signal=original, show=True
)
return onsets, offsets
def bin_x_by_y(
data: Union[pd.DataFrame, pd.Series],
x: str,
y: str,
bins: Union[int, np.ndarray] = 10,
min_count: int = 0,
) -> Tuple[np.ndarray, float, float, int]:
"""
Bins the values in a column X of a dataframe by bins
specified based on the values of another column Y
"""
if isinstance(data, pd.Series):
data = pd_series_to_df(data)
# get bins
data["bins"], bins = pd.cut(data[y], bins=bins, retbins=True)
data = data.loc[data.bins != np.nan]
bins_centers = (
bins[0] + np.cumsum(np.diff(bins)) - abs(np.diff(bins)[0] / 2)
)
# get values
mu = data.groupby("bins")[x].mean()
sigma = data.groupby("bins")[x].std()
counts = data.groupby("bins")[x].count()
# remove bins with values too low
mu[counts < min_count] = np.nan
sigma[counts < min_count] = np.nan
counts[counts < min_count] = np.nan
return bins_centers, mu, sigma, counts
def interpolate_nans(**entries) -> dict:
return (
pd.DataFrame(entries)
.interpolate(in_place=True, axis=0, limit_direction="both")
.to_dict()
)
def select_by_indices(tracking: dict, selected_indices: np.ndarray) -> dict:
"""
Given a dictionary of tracking data it select data at given timestamps/indices
"""
tracking = tracking.copy()
for key in KEYS:
if key in tracking.keys():
tracking[key] = tracking[key][selected_indices]
return tracking
def downsample_tracking_data(tracking: dict, factor: int = 10) -> None:
"""
Downsamples tracking data to speed plots and stuff
"""
for key in KEYS:
if key in tracking.keys():
tracking[key] = tracking[key][::10]
def bin_tracking_data_by_arena_position(tracking: dict) -> pd.DataFrame:
"""
Givena dictionary with tracking data from the hairpin,
including linearized tracking, it bins the tracking data by
the arena segment the mouse is in
"""
tracking_df = pd.DataFrame(
dict(
x=tracking["x"],
y=tracking["y"],
speed=tracking["speed"],
orientation=tracking["orientation"],
direction_of_movement=tracking["direction_of_movement"],
segment=tracking["segment"],
global_coord=tracking["global_coord"],
)
)
# get mean and std of each value
means = tracking_df.groupby("segment").mean()
stds = tracking_df.groupby("segment").std()
stds.columns = stds.columns.values + "_std"
return pd.concat([means, stds], axis=1)
| 29.53112
| 106
| 0.627933
|
50fc61ebea570abfaaf60a69ebcf1da44827505f
| 52
|
py
|
Python
|
forms/__init__.py
|
kashifpk/pyckapps.visit_counter
|
e9769817b7259422307c382e0dea1ccb2cdd4d5f
|
[
"Apache-2.0"
] | 2
|
2015-01-11T22:23:58.000Z
|
2016-05-17T06:57:57.000Z
|
forms/__init__.py
|
kashifpk/pyckapps.visit_counter
|
e9769817b7259422307c382e0dea1ccb2cdd4d5f
|
[
"Apache-2.0"
] | 31
|
2015-01-14T11:30:50.000Z
|
2017-01-31T14:35:48.000Z
|
temp/newapp_scaffold/forms/__init__.py
|
kashifpk/PyCK
|
11513c6b928d37afcf83de717e8d2f74fce731af
|
[
"Ruby"
] | null | null | null |
#from myforms import MyForm
#__all__ = ['MyForm',]
| 13
| 27
| 0.692308
|
0082af62285df20ee8f30c9f1e5d00e2c62d6307
| 36
|
py
|
Python
|
7KYU/add_v2.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | 4
|
2021-07-17T22:48:03.000Z
|
2022-03-25T14:10:58.000Z
|
7KYU/add_v2.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | null | null | null |
7KYU/add_v2.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | 3
|
2021-06-14T14:18:16.000Z
|
2022-03-16T06:02:02.000Z
|
add = lambda a: lambda b: a + b
| 18
| 31
| 0.527778
|
433714211d30d91ff6c24b554868309de11fc025
| 786
|
py
|
Python
|
Python/StackMin.py
|
lywc20/daily-programming
|
78529e535aea5bda409e5a2a009274dca7011e29
|
[
"MIT"
] | null | null | null |
Python/StackMin.py
|
lywc20/daily-programming
|
78529e535aea5bda409e5a2a009274dca7011e29
|
[
"MIT"
] | null | null | null |
Python/StackMin.py
|
lywc20/daily-programming
|
78529e535aea5bda409e5a2a009274dca7011e29
|
[
"MIT"
] | null | null | null |
###Designing Stack with O(1) min,pop,push
class Node(obj):
def __init__(self,data=None,next_node=None):
self.data = data
self.next_node = next_node
self.min = None
def get_val(self):
return self.data
def get_next(self):
return self.next_node
def set_next(self)
class Stack(obj):
def __init__(self,top=None):
self.top = top
self.min = None
def push(self,node):
if self.top == None:
self.top = node
self.top.min = self.top.get_val()
else:
if node.get_val() > self.top.min:
node.min = self.top.min
else:
node.min = node.get_val
node.next_node = self.top()
self.top = node
| 24.5625
| 48
| 0.529262
|
902f003207f59e2fe2b9c9f73c13ad6fd050607e
| 3,640
|
py
|
Python
|
scripts/artifacts/chromeDownloads.py
|
f0r3ns1cat0r/ALEAPP
|
cd7eb4e7fea6f70fbf336382eeec47fda91f61d3
|
[
"MIT"
] | 187
|
2020-02-22T23:35:32.000Z
|
2022-03-31T13:46:24.000Z
|
scripts/artifacts/chromeDownloads.py
|
f0r3ns1cat0r/ALEAPP
|
cd7eb4e7fea6f70fbf336382eeec47fda91f61d3
|
[
"MIT"
] | 65
|
2020-02-25T18:22:47.000Z
|
2022-03-27T21:41:21.000Z
|
scripts/artifacts/chromeDownloads.py
|
f0r3ns1cat0r/ALEAPP
|
cd7eb4e7fea6f70fbf336382eeec47fda91f61d3
|
[
"MIT"
] | 47
|
2020-02-24T22:33:35.000Z
|
2022-03-11T05:19:42.000Z
|
import os
import sqlite3
import textwrap
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, timeline, is_platform_windows, get_next_unused_name, does_column_exist_in_db, open_sqlite_db_readonly
def get_browser_name(file_name):
if 'microsoft' in file_name.lower():
return 'Edge'
elif 'chrome' in file_name.lower():
return 'Chrome'
elif 'opera' in file_name.lower():
return 'Opera'
else:
return 'Unknown'
def get_chromeDownloads(files_found, report_folder, seeker, wrap_text):
for file_found in files_found:
file_found = str(file_found)
if not os.path.basename(file_found) == 'History': # skip -journal and other files
continue
browser_name = get_browser_name(file_found)
if file_found.find('app_sbrowser') >= 0:
browser_name = 'Browser'
elif file_found.find('.magisk') >= 0 and file_found.find('mirror') >= 0:
continue # Skip sbin/.magisk/mirror/data/.. , it should be duplicate data??
db = open_sqlite_db_readonly(file_found)
cursor = db.cursor()
# check for last_access_time column, an older version of chrome db (32) does not have it
if does_column_exist_in_db(db, 'downloads', 'last_access_time') == True:
last_access_time_query = '''
CASE last_access_time
WHEN "0"
THEN ""
ELSE datetime(last_access_time / 1000000 + (strftime('%s', '1601-01-01')), "unixepoch")
END AS "Last Access Time"'''
else:
last_access_time_query = "'' as last_access_query"
cursor.execute(f'''
SELECT
CASE start_time
WHEN "0"
THEN ""
ELSE datetime(start_time / 1000000 + (strftime('%s', '1601-01-01')), "unixepoch")
END AS "Start Time",
CASE end_time
WHEN "0"
THEN ""
ELSE datetime(end_time / 1000000 + (strftime('%s', '1601-01-01')), "unixepoch")
END AS "End Time",
{last_access_time_query},
tab_url,
target_path, state, opened, received_bytes, total_bytes
FROM downloads
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
report = ArtifactHtmlReport(f'{browser_name} Downloads')
#check for existing and get next name for report file, so report from another file does not get overwritten
report_path = os.path.join(report_folder, f'{browser_name} Downloads.temphtml')
report_path = get_next_unused_name(report_path)[:-9] # remove .temphtml
report.start_artifact_report(report_folder, os.path.basename(report_path))
report.add_script()
data_headers = ('Start Time','End Time','Last Access Time','URL','Target Path','State','Opened?','Received Bytes','Total Bytes')
data_list = []
for row in all_rows:
data_list.append((row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8]))
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = f'{browser_name} Downloads'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = f'{browser_name} Downloads'
timeline(report_folder, tlactivity, data_list, data_headers)
else:
logfunc(f'No {browser_name} download data available')
db.close()
| 40.898876
| 145
| 0.613462
|
a22a538527e81ed9317c1e43e3928643133fde0b
| 15,095
|
py
|
Python
|
polysquarecmakelinter/find_variables_in_scopes.py
|
polysquare/polysquare-cmake-linter
|
83f2f12d562b461f9d5bb700b0a17aa9f99751e0
|
[
"MIT"
] | 5
|
2016-08-15T15:25:53.000Z
|
2022-03-31T15:49:37.000Z
|
polysquarecmakelinter/find_variables_in_scopes.py
|
polysquare/polysquare-cmake-linter
|
83f2f12d562b461f9d5bb700b0a17aa9f99751e0
|
[
"MIT"
] | 6
|
2015-01-01T17:05:25.000Z
|
2018-02-01T02:31:09.000Z
|
polysquarecmakelinter/find_variables_in_scopes.py
|
polysquare/polysquare-cmake-linter
|
83f2f12d562b461f9d5bb700b0a17aa9f99751e0
|
[
"MIT"
] | 1
|
2021-01-06T17:32:09.000Z
|
2021-01-06T17:32:09.000Z
|
# /polysquarecmakelinter/find_variables_in_scopes.py
#
# Find all variables and order by scope. Scoping in CMake is a bit strange,
# by default, all variables will have function scope, or failing that, global
# scope. Any variable set by foreach as a loop variable has scope only within
# that loop. Macro arguments have scope within their respective macros and
# anything set inside of a foreach loop has scope inside of its
# enclosing function.
#
# These functions don't necessarily follow the same scoping structure that
# CMake uses internally - scoping in CMake is actually based on the
# runtime call tree, so if function A calls function B then whatever
# was visible in function A will also be visible in function B. However,
# setting that variable will cause it to be set in function B's scope.
#
# Instead, the model followed is "logical scope" - variables from function
# B called from function B are not visible in function B, except if function
# B was defined inside function A. The same rules apply for macros.
#
# In the author's opinion, checks that rely on CMake's own scoping behavior
# are really enforcing bad behavior - implicit variable visibility is bad
# for maintainability and shouldn't be relied on in any case.
#
# Global Scope:
# set (VARIABLE VALUE)
#
# macro (my_macro MACRO_ARG)
# MACRO_ARG has my_macro scope
# set (MY_MACRO_VAR VALUE) MY_MACRO_VAR has my_function scope because it
# was called by my_function
# endmacro ()
#
# function (my_other_function)
# set (PARENT_VAR VALUE PARENT_SCOPE) PARENT_VAR has my_function scope
# endfunction ()
#
# function (my_function ARGUMENT)
# ARGUMENT has my_function scope
# set (MY_FUNC_VAR VALUE) MY_FUNC_VAR has my_function scope
# foreach (LOOP_VAR ${MY_FUNC_VAR})
# LOOP_VAR has foreach scope
# set (VAR_SET_IN_LOOP VALUE) VAR_SET_IN_LOOP has my_function scope
# endforeach ()
# my_other_function ()
# my_macro ()
# endfunction ()
#
# See /LICENCE.md for Copyright information
"""Find all set variables and order by scope."""
import re
from collections import namedtuple
from polysquarecmakelinter import find_set_variables
Variable = namedtuple("Variable", "node source")
ScopeInfo = namedtuple("ScopeInfo", "name type")
_RE_VARIABLE_USE = re.compile(r"(?<![^\${])[0-9A-Za-z_]+(?![^]}])")
FOREACH_KEYWORDS = [
"IN",
"LISTS",
"ITEMS"
]
IF_KEYWORDS = [
"NOT",
"STREQUAL",
"STRLESS",
"STRGREATER",
"LESS",
"EQUAL",
"GREATER",
"VERSION_LESS",
"VERSION_EQUAL",
"VERSION_GREATER",
"EXISTS",
"COMMAND",
"POLICY",
"TARGET",
"IS_NEWER_THAN",
"IS_DIRECTORY",
"IS_ABSOLUTE",
"MATCHES",
"DEFINED",
"AND",
"OR",
"OFF",
"ON",
"TRUE",
"FALSE",
"YES",
"Y",
"NO",
"N",
"IGNORE",
"NOTFOUND"
]
# We use a class with constant variables here so that we can get int->int
# comparison. Comparing enums is slow because of the type lookup.
class VariableSource(object): # suppress(too-few-public-methods)
"""The source of a variable in a scope."""
ForeachVar = 0
MacroArg = 1
FunctionArg = 2
FunctionVar = 3
MacroVar = 4
GlobalVar = 5
class ScopeType(object): # suppress(too-few-public-methods)
"""The source of a variable in a scope."""
Foreach = 0
Macro = 1
Function = 2
Global = 3
class _Scope(object): # suppress(too-few-public-methods)
"""A place where variables are hoisted."""
def __init__(self, info, parent):
"""Initialize parent."""
super(_Scope, self).__init__()
self.parent = parent
self.info = info
self.scopes = []
def add_subscope(self, name, node, parent, factory):
"""Add a new subscope."""
assert node.__class__.__name__ != "ToplevelBody"
node_scope_types = {
"ForeachStatement": ScopeType.Foreach,
"MacroDefinition": ScopeType.Macro,
"FunctionDefinition": ScopeType.Function
}
assert node.__class__.__name__ in node_scope_types
scope_type = node_scope_types[node.__class__.__name__]
self.scopes.append(factory(ScopeInfo(name,
scope_type),
parent))
def traverse_scopes(abstract_syntax_tree, # NOQA
body_function_call,
header_function_call,
factory):
"""Find all set variables in tree and orders into scopes."""
global_scope = factory(ScopeInfo("toplevel", ScopeType.Global), None)
def _header_body_visitor(enclosing_scope, header_scope, header, body):
"""Visit a header-body like node."""
if header is not None:
header_function_call(header, enclosing_scope, header_scope)
for statement in body:
_node_recurse(statement, enclosing_scope, header_scope)
def _node_recurse(node, enclosing_scope, header_scope):
"""Visit any node, adjusts scopes."""
def _handle_if_block(node):
"""Handle if blocks."""
_header_body_visitor(enclosing_scope,
header_scope,
node.if_statement.header,
node.if_statement.body)
for elseif in node.elseif_statements:
_header_body_visitor(enclosing_scope,
header_scope,
elseif.header,
elseif.body)
if node.else_statement:
_header_body_visitor(enclosing_scope,
header_scope,
node.else_statement.header,
node.else_statement.body)
def _handle_foreach_statement(node):
"""Handle foreach statements."""
header_scope.add_subscope("foreach", node, header_scope, factory)
_header_body_visitor(enclosing_scope,
header_scope.scopes[-1],
node.header,
node.body)
def _handle_while_statement(node):
"""Handle while statements."""
_header_body_visitor(enclosing_scope,
header_scope,
node.header,
node.body)
def _handle_function_declaration(node):
"""Handle function declarations."""
header_scope.add_subscope(node.header.arguments[0].contents,
node,
header_scope,
factory)
_header_body_visitor(header_scope.scopes[-1],
header_scope.scopes[-1],
node.header,
node.body)
def _handle_macro_declaration(node):
"""Handle macro declarations."""
header_scope.add_subscope(node.header.arguments[0].contents,
node,
header_scope,
factory)
_header_body_visitor(header_scope.scopes[-1],
header_scope.scopes[-1],
node.header,
node.body)
def _handle_function_call(node):
"""Handle function calls - does nothing, nothing to recurse."""
body_function_call(node, enclosing_scope, header_scope)
def _handle_toplevel_body(node):
"""Handle the special toplevel body node."""
_header_body_visitor(enclosing_scope,
header_scope,
None,
node.statements)
node_dispatch = {
"IfBlock": _handle_if_block,
"ForeachStatement": _handle_foreach_statement,
"WhileStatement": _handle_while_statement,
"FunctionDefinition": _handle_function_declaration,
"MacroDefinition": _handle_macro_declaration,
"ToplevelBody": _handle_toplevel_body,
"FunctionCall": _handle_function_call
}
node_dispatch[node.__class__.__name__](node)
_node_recurse(abstract_syntax_tree, global_scope, global_scope)
return global_scope
def _scope_to_bind_var_to(function_call, enclosing):
"""Find a scope to bind variables set by function_call."""
if function_call.name == "set":
try:
if function_call.arguments[2].contents == "PARENT_SCOPE":
assert enclosing.parent is not None
enclosing = enclosing.parent
elif function_call.arguments[2].contents == "CACHE":
while enclosing.parent is not None:
enclosing = enclosing.parent
except IndexError: # suppress(pointless-except)
pass
# Another special case for set_property with GLOBAL as the
# first argument. Create a notional "variable"
elif function_call.name == "set_property":
assert len(function_call.arguments[0]) >= 3
if function_call.arguments[0].contents == "GLOBAL":
while enclosing.parent is not None:
enclosing = enclosing.parent
return enclosing
def set_in_tree(abstract_syntax_tree):
"""Find variables set by scopes."""
def scope_factory(info, parent):
"""Construct a "set variables" scope."""
class SetVariablesScope(_Scope): # suppress(too-few-public-methods)
"""Set variables in this scope."""
def __init__(self, info, parent):
"""Initialize set_vars member."""
super(SetVariablesScope, self).__init__(info, parent)
self.set_vars = []
return SetVariablesScope(info, parent)
def body_function_call(node, enclosing, body_header):
"""Handle function calls in a body and provides scope."""
del body_header
var_types = {
ScopeType.Macro: VariableSource.MacroVar,
ScopeType.Function: VariableSource.FunctionVar,
ScopeType.Global: VariableSource.GlobalVar
}
set_var = find_set_variables.by_function_call(node)
if set_var:
# Special case for "set" and PARENT_SCOPE/CACHE scope
enclosing = _scope_to_bind_var_to(node, enclosing)
info = enclosing.info
enclosing.set_vars.append(Variable(set_var,
var_types[info.type]))
def header_function_call(node, header_enclosing, header):
"""Handle the "header" function call and provides scope."""
del header_enclosing
def _get_header_vars(header):
"""Add variables implicitly set by header function call."""
header_variables = {
"foreach": lambda h: [h.arguments[0]],
"function": lambda h: h.arguments[1:],
"macro": lambda h: h.arguments[1:]
}
try:
return header_variables[header.name](header)
except KeyError:
return []
var_types = {
ScopeType.Foreach: VariableSource.ForeachVar,
ScopeType.Function: VariableSource.FunctionArg,
ScopeType.Macro: VariableSource.MacroArg
}
nodes = _get_header_vars(node)
info = header.info
header.set_vars += [Variable(v, var_types[info.type]) for v in nodes]
return traverse_scopes(abstract_syntax_tree,
body_function_call,
header_function_call,
scope_factory)
def used_in_tree(abstract_syntax_tree):
"""Find variables used in scopes."""
def scope_factory(info, parent):
"""Construct a "set variables" scope."""
class UsedVariablesScope(_Scope): # suppress(too-few-public-methods)
"""Used variables in this scope."""
def __init__(self, info, parent):
"""Initialize used_vars member."""
super(UsedVariablesScope, self).__init__(info, parent)
self.used_vars = []
return UsedVariablesScope(info, parent)
def body_function_call(node, body_enclosing, header):
"""Handle function calls in a node body."""
del body_enclosing
var_types = {
ScopeType.Foreach: VariableSource.ForeachVar,
ScopeType.Function: VariableSource.FunctionVar,
ScopeType.Macro: VariableSource.MacroVar,
ScopeType.Global: VariableSource.GlobalVar
}
info = header.info
header.used_vars.extend([Variable(a,
var_types[info.type])
for a in node.arguments
if _RE_VARIABLE_USE.search(a.contents)])
def header_function_call(node, header_enclosing, current_header):
"""Handle function calls in a node header."""
del header_enclosing
var_types = {
ScopeType.Foreach: lambda p: var_types[p.info.type](p.parent),
ScopeType.Function: lambda _: VariableSource.FunctionVar,
ScopeType.Macro: lambda _: VariableSource.MacroVar,
ScopeType.Global: lambda _: VariableSource.GlobalVar
}
starts_new_header = ["foreach", "function", "macro"]
if node.name in starts_new_header:
header = current_header.parent
else:
header = current_header
sct = header.info.type
kw_exclude = {
"if": IF_KEYWORDS,
"elseif": IF_KEYWORDS,
"while": IF_KEYWORDS,
"foreach": FOREACH_KEYWORDS,
"function": [],
"macro": [],
"else": []
}
header_pos_exclude = {
"if": lambda _: False,
"elseif": lambda _: False,
"while": lambda _: False,
"foreach": lambda n: n == 0,
"function": lambda _: True,
"macro": lambda _: True,
"else": lambda _: True
}
for index, argument in enumerate(node.arguments):
is_var_use = _RE_VARIABLE_USE.search(argument.contents) is not None
not_kw_excluded = argument.contents not in kw_exclude[node.name]
not_pos_excluded = not header_pos_exclude[node.name](index)
if is_var_use and not_kw_excluded and not_pos_excluded:
variable_type = var_types[sct](header.parent)
header.used_vars.append(Variable(argument, variable_type))
return traverse_scopes(abstract_syntax_tree,
body_function_call,
header_function_call,
scope_factory)
| 34.861432
| 79
| 0.585426
|
89aa1a222185d1b2aae3e086a9a34a9f6e1cd4e3
| 5,537
|
py
|
Python
|
keras/resnet/resnet_s56.py
|
rjuppa/vmmr
|
a968b869e00fe46ef862fe794b063318a66c894f
|
[
"MIT"
] | null | null | null |
keras/resnet/resnet_s56.py
|
rjuppa/vmmr
|
a968b869e00fe46ef862fe794b063318a66c894f
|
[
"MIT"
] | null | null | null |
keras/resnet/resnet_s56.py
|
rjuppa/vmmr
|
a968b869e00fe46ef862fe794b063318a66c894f
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import math, json, os, pickle, sys
import keras
from keras.callbacks import CSVLogger, EarlyStopping, ModelCheckpoint, LambdaCallback
from keras.layers import Dense, Dropout, Flatten, GlobalMaxPooling2D
from keras.models import Model, Sequential
from keras.optimizers import SGD, Adam, RMSprop
from keras.preprocessing.image import ImageDataGenerator
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# DATADIR = "/storage/plzen1/home/radekj/vmmr"
DATADIR = "/Users/radekj/devroot/vmmr"
name = "sample56"
result_path = os.path.join(DATADIR, 'results')
log_file = "{}/{}/{}_log.csv".format(result_path, name, name)
csv_logger = CSVLogger(log_file, append=True)
SIZE = (224, 224)
BATCH_SIZE = 32
EPOCH = 30
num_classes = 5
input_shape = (224, 224, 3)
def get_model():
model = keras.applications.resnet50.ResNet50(include_top=True, input_shape=input_shape)
return model
def save_history(history):
hist_file = "{}/{}/{}_log.json".format(result_path, name, name)
with open(hist_file, 'w') as file_pi:
file_pi.write(json.dumps(history.history))
def write_to_log(epoch, logs):
my_log_file = "{}/{}/{}_log.txt".format(result_path, name, name)
if not os.path.isfile(my_log_file):
with open(my_log_file, mode='a+') as f:
f.write("epoch, loss, acc, val_loss, val_acc\n")
with open(my_log_file, mode='a') as f:
f.write("{}, {}, {}, {}, {},\n".format(
epoch, logs['loss'], logs['acc'], logs['val_loss'], logs['val_acc']))
def train_vgg(folder):
DATA_DIR = folder
TRAIN_DIR = os.path.join(DATA_DIR, 'train')
VALID_DIR = os.path.join(DATA_DIR, 'valid')
TEST_DIR = os.path.join(DATA_DIR, 'test')
save_aug = os.path.join(DATA_DIR, 'tmp')
num_train_samples = sum([len(files) for r, d, files in os.walk(TRAIN_DIR)])
num_valid_samples = sum([len(files) for r, d, files in os.walk(VALID_DIR)])
num_train_steps = math.floor(num_train_samples / BATCH_SIZE)
num_valid_steps = math.floor(num_valid_samples / BATCH_SIZE)
shift = 0.05
train_gen = ImageDataGenerator(
width_shift_range=shift,
height_shift_range=shift,
horizontal_flip=False,
vertical_flip=True,
rotation_range=4,
zoom_range=0.1)
batches = train_gen.flow_from_directory(directory=TRAIN_DIR,
target_size=SIZE,
color_mode="rgb",
batch_size=BATCH_SIZE,
class_mode="categorical",
shuffle=True,
save_to_dir=save_aug)
val_gen = ImageDataGenerator()
val_batches = val_gen.flow_from_directory(directory=VALID_DIR,
target_size=SIZE,
color_mode="rgb",
batch_size=BATCH_SIZE,
class_mode="categorical",
shuffle=True)
model = get_model()
model.layers.pop()
model.layers.pop()
classes = list(iter(batches.class_indices))
# add last layer
x = model.layers[-1].output
x = GlobalMaxPooling2D()(x)
# x = Flatten()(last)
x = Dense(4096, activation="relu")(x)
x = Dense(1024, activation="relu")(x)
x = Dense(len(classes), activation="softmax")(x)
finetuned_model = Model(model.input, x)
finetuned_model.summary()
# opt = RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0)
opt = Adam(lr=0.0001)
# opt = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
finetuned_model.compile(optimizer=opt,
loss='categorical_crossentropy',
metrics=['accuracy'])
for c in batches.class_indices:
classes[batches.class_indices[c]] = c
finetuned_model.classes = classes
early_stopping = EarlyStopping(patience=10)
my_log_callback = LambdaCallback(on_epoch_end=lambda epoch, logs: write_to_log(epoch, logs),)
saved_model = "{}/{}/{}_best.h5".format(result_path, name, name)
check_pointer = ModelCheckpoint(saved_model, verbose=1, save_best_only=True)
print("batches.batch_size: {}".format(batches.batch_size))
print("num_valid_steps: {}".format(num_valid_steps))
print("num_train_steps: {}".format(num_train_steps))
history = finetuned_model.fit_generator(batches, steps_per_epoch=num_train_steps, epochs=EPOCH,
callbacks=[early_stopping, check_pointer, my_log_callback],
validation_data=val_batches, validation_steps=num_valid_steps)
save_history(history)
saved_model = "{}/{}/{}_final.h5".format(result_path, name, name)
model.save("{}_final.h5".format(saved_model))
if __name__ == '__main__':
"""
dataset_path: /Users/radekj/devroot/vmmr/datasets/sample5
/storage/plzen1/home/radekj/vmmr"
"""
print(len(sys.argv))
if len(sys.argv) < 2:
print("Need param: python train_vgg16.py dataset_path")
exit(1)
folder = str(sys.argv[1])
exists = os.path.isdir(folder)
if not exists:
print("Folder '{}' not found.".format(folder))
exit(1)
print("===== name: {}".format(name))
print("===== folder: {}".format(folder))
train_vgg(folder)
print("===== end.")
| 36.668874
| 99
| 0.611161
|
39a11f844a67ccdae0c884f92ae57301d5a91156
| 3,901
|
py
|
Python
|
algos/GLASSO/cuda_GISTA.py
|
GalSha/GLASSO_Framework
|
fb884d810b2aad2e80124d9d4cf5cd1aaf0d8688
|
[
"BSD-3-Clause"
] | null | null | null |
algos/GLASSO/cuda_GISTA.py
|
GalSha/GLASSO_Framework
|
fb884d810b2aad2e80124d9d4cf5cd1aaf0d8688
|
[
"BSD-3-Clause"
] | null | null | null |
algos/GLASSO/cuda_GISTA.py
|
GalSha/GLASSO_Framework
|
fb884d810b2aad2e80124d9d4cf5cd1aaf0d8688
|
[
"BSD-3-Clause"
] | null | null | null |
from numpy import inf
from numpy import linalg
from algos.GLASSO.base import base
from utils.common import cp_soft_threshold
from utils.GLASSO.glasso import cuda_objective_f_cholesky
class cuda_GISTA(base):
def __init__(self, T, N, lam, ls_iter, step_lim):
super(cuda_GISTA, self).__init__(T, N, lam)
self.ls_iter = ls_iter
self.step_lim = step_lim
self.save_name = "cuda_GISTA_N{N}_T{T}_LsIter{ls_iter}_StepLim{step_lim}" \
.format(N=self.N, T=self.T, ls_iter=self.ls_iter, step_lim=self.step_lim)
def compute(self, S, A0, status_f, history, test_check_f):
import cupy as cp
import cupyx
cupyx.seterr(linalg='raise')
S = cp.array(S, dtype='float32')
As = []
status = []
cp_step_lim = cp.float32(self.step_lim)
if A0 is None:
A_diag = self.lam * cp.ones(self.N, dtype='float32')
A_diag = A_diag + cp.diag(S)
A_diag = 1.0 / A_diag
A = cp.diag(A_diag)
A_diag = None
else:
A = cp.array(A0, dtype='float32')
if history:
As.append(cp.asnumpy(A))
if status_f is not None: status.append(status_f(A, cp.asarray(0.0)))
init_step = cp.asarray(1.0, dtype='float32')
A_inv = cp.linalg.inv(A)
for t in range(self.T):
if test_check_f is not None:
if test_check_f(A, S, self.lam, A_inv):
t -= 1
break
A_next, step = cuda_GISTA_linesearch(cp, A, S, self.lam, A_inv, max_iter=self.ls_iter, init_step=init_step, step_lim=cp_step_lim)
if step == 0:
init_step = 0
else:
A_next_inv = cp.linalg.inv(A_next)
A_next_A = A_next - A
init_step = cp.sum(cp.square(A_next_A, dtype='float32'), dtype='float32')
div_init_step = cp.trace((A_next_A) @ (A_inv - A_next_inv), dtype='float32')
A_next_A = None
if div_init_step != 0:
init_step /= div_init_step
else:
init_step = cp.asarray(0.0)
A = A_next
A_next = None
A_inv = A_next_inv
A_next_inv = None
if history:
As.append(cp.asnumpy(A))
if status_f is not None: status.append(status_f(A, step))
if init_step == 0: t = inf
return A, status, As, t+1
def objective_Q(cp, objective_f_value, A, D, A_next, step):
A_next_A = A_next - A
return objective_f_value + cp.trace(A_next_A @ D, dtype='float32') + (
0.5 / step) * (cp.sum(cp.square(A_next_A, dtype='float32'), dtype='float32'))
def cuda_GISTA_linesearch(cp, A, S, lam, A_inv, max_iter, init_step, step_lim):
if init_step == 0:
return A, cp.array(0.0)
step = init_step
D = S - A_inv
L = cp.linalg.cholesky(A)
init_F_value = cuda_objective_f_cholesky(cp, A,S,L)
L = None
for _ in range(max_iter):
if step < step_lim: break
try:
A_next = cp_soft_threshold(cp, A - step * D, step * lam)
A_next = A_next + cp.transpose(A_next)
A_next *= 0.5
L_next = cp.linalg.cholesky(A_next)
if cuda_objective_f_cholesky(cp, A_next, S, L_next) <= objective_Q(cp, init_F_value, A, D, A_next, step):
return A_next, step
except linalg.LinAlgError:
pass
step *= 0.5
step = cp.linalg.eigvalsh(A)[0] ** 2
A_next = cp_soft_threshold(cp, A - step * D, step * lam)
A_next = A_next + cp.transpose(A_next)
A_next *= 0.5
try:
# TODO: not SPD sometimes
L_next = cp.linalg.cholesky(A_next)
except linalg.LinAlgError:
step = 0.0
A_next = A
return A_next, cp.array(step)
| 34.830357
| 141
| 0.561907
|
70fbc8e2a17ec307f5572a9f0a9f8601361228f0
| 255
|
py
|
Python
|
oauth_client/manage.py
|
brollins90/dsOauth
|
fb1aeba08286854cdec9d1b3afede64404cce119
|
[
"MIT"
] | null | null | null |
oauth_client/manage.py
|
brollins90/dsOauth
|
fb1aeba08286854cdec9d1b3afede64404cce119
|
[
"MIT"
] | null | null | null |
oauth_client/manage.py
|
brollins90/dsOauth
|
fb1aeba08286854cdec9d1b3afede64404cce119
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "oauth_client.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 23.181818
| 76
| 0.776471
|
fa2dc4b4f5f6ee2af16a54413dc7d846818c5466
| 3,181
|
py
|
Python
|
personalapp/personalapp/settings.py
|
mnithya/mypersonalapp
|
7a99ce9306107e66a88222bd876164ce813c1835
|
[
"MIT"
] | null | null | null |
personalapp/personalapp/settings.py
|
mnithya/mypersonalapp
|
7a99ce9306107e66a88222bd876164ce813c1835
|
[
"MIT"
] | null | null | null |
personalapp/personalapp/settings.py
|
mnithya/mypersonalapp
|
7a99ce9306107e66a88222bd876164ce813c1835
|
[
"MIT"
] | null | null | null |
"""
Django settings for personalapp project.
Generated by 'django-admin startproject' using Django 1.9.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ru^6vne^2s-emloc=2_89kre-%h=%u62+teodj_3#d!z=2)e=^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'personalapp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'personalapp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| 26.07377
| 91
| 0.700094
|
24e74e0acfc5c08cd264ca27698cd03dee80c38f
| 267
|
py
|
Python
|
exercicios/Lista1/Q21.py
|
AlexandrePeBrito/CursoUdemyPython
|
3de58cb30c9f333b32078309847179ff3f9d7e22
|
[
"MIT"
] | null | null | null |
exercicios/Lista1/Q21.py
|
AlexandrePeBrito/CursoUdemyPython
|
3de58cb30c9f333b32078309847179ff3f9d7e22
|
[
"MIT"
] | null | null | null |
exercicios/Lista1/Q21.py
|
AlexandrePeBrito/CursoUdemyPython
|
3de58cb30c9f333b32078309847179ff3f9d7e22
|
[
"MIT"
] | null | null | null |
#Leia um valor de massa em libras e apresente-o convertido em quilogramas.
#A formula de conversao eh: K=L*0.45 ,
#Sendo K a massa em quilogramas e L a massa em libras.
l=float(input("Informe a massa em libras: "))
k=l*0.45
print(f"O peso convertido em KG eh {k}")
| 29.666667
| 74
| 0.719101
|
e3304d55e263566a2adac7ca3d08576c8498fa85
| 4,426
|
py
|
Python
|
extensions/adm/templates/5-todo/__init__.py
|
icoman/AppServer
|
b7715d90662e112638000b5a3c242fbcb59488a3
|
[
"MIT"
] | null | null | null |
extensions/adm/templates/5-todo/__init__.py
|
icoman/AppServer
|
b7715d90662e112638000b5a3c242fbcb59488a3
|
[
"MIT"
] | null | null | null |
extensions/adm/templates/5-todo/__init__.py
|
icoman/AppServer
|
b7715d90662e112638000b5a3c242fbcb59488a3
|
[
"MIT"
] | null | null | null |
#
# Sample Todo module
#
"""
Your license message ...
"""
import os, bottle, json, datetime
from appmodule import AppModule
from .modeldb import setupDB, Todo, MyS
class MyAppModule(AppModule):
def init(self):
DSN = self.module_config.get('DSN')
try:
setupDB(DSN)
except:
# ignore error here
pass
app = MyAppModule()
def getApp():
return app
@app.get('/static/<path:path>')
def _(path):
return bottle.static_file(path, root=app.module_static_folder)
@app.route('/')
@app.auth('access module')
@app.view('index.tpl')
def _():
'''
Default view
'''
bs = app.get_beaker_session()
user = bs.get('username')
if user:
title = 'Todo for {}'.format(user)
else:
title = 'Todo for Anonymous'
return dict(user=user, title=title)
@app.post('/list')
@app.auth('access module')
def _():
try:
with MyS() as session:
bs = app.get_beaker_session()
userid = bs.get('userid', 0)
filter = bottle.request.forms.filter
if filter:
q = session.query(Todo) \
.filter(Todo.title.like(u'%{}%'.format(filter)) | Todo.description.like(u'%{}%'.format(filter))) \
.order_by(Todo.id.asc())
else:
q = session.query(Todo).order_by(Todo.id.asc())
L = []
for i in q.all():
d = {'id': i.id, 'userid': i.userid,
'userfullname': i.userfullname, 'title': i.title,
'dataora': i.dataora.strftime("%d-%b-%Y %H:%M:%S"),
'description': i.description,
'done': i.done}
L.append(d)
ret = dict(ok=True, data=L, userid=userid)
except Exception as ex:
ret = dict(ok=False, data=str(ex))
return ret
@app.post('/add')
@app.auth('access module')
def _():
try:
with MyS() as session:
bs = app.get_beaker_session()
userfullname = bs.get('userfullname', u'Anonymous')
userid = bs.get('userid', 0)
ob = Todo()
ob.userid = userid
ob.userfullname = userfullname
data = json.loads(bottle.request.forms.data)
ob.dataora = datetime.datetime.now()
ob.title = data[0]
ob.description = data[1]
ob.done = (data[2] == 'yes')
session.add(ob)
session.commit() # ob.id is available after commit
obid = ob.id
ret = dict(ok=True, data=obid)
except Exception as ex:
ret = dict(ok=False, data=str(ex))
return ret
@app.post('/delete')
@app.auth('access module')
def _():
try:
with MyS() as session:
bs = app.get_beaker_session()
userid = bs.get('userid', 0)
todo_id = int(bottle.request.forms.get('id', 0))
ob = session.query(Todo).filter(Todo.id == todo_id).first()
if ob:
obid = ob.id
if userid == ob.userid:
session.delete(ob)
else:
return dict(ok=False, data='Access denied.')
else:
obid = 0
ret = dict(ok=True, data=obid)
except Exception as ex:
ret = dict(ok=False, data=str(ex))
return ret
@app.post('/update')
@app.auth('access module')
def _():
try:
with MyS() as session:
bs = app.get_beaker_session()
userfullname = bs.get('userfullname', u'Anonymous')
userid = bs.get('userid', 0)
todo_id = int(bottle.request.forms.get('id', 0))
data = json.loads(bottle.request.forms.data)
ob = session.query(Todo).filter(Todo.id == todo_id).first()
if ob:
obid = ob.id
if userid == ob.userid:
ob.userfullname = userfullname
ob.dataora = datetime.datetime.now()
ob.title = data[0]
ob.description = data[1]
ob.done = (data[2] == 'yes')
else:
return dict(ok=False, data='Access denied.')
else:
obid = 0
ret = dict(ok=True, data=obid)
except Exception as ex:
ret = dict(ok=False, data=str(ex))
return ret
| 28.012658
| 118
| 0.504745
|
a76a006508a345656b9a3d417222fcd850288638
| 6,009
|
py
|
Python
|
python/dgl/nn/pytorch/conv/agnnconv.py
|
gvvynplaine/dgl
|
6294677f8acc6bc040baf922910473e1c82995ba
|
[
"Apache-2.0"
] | null | null | null |
python/dgl/nn/pytorch/conv/agnnconv.py
|
gvvynplaine/dgl
|
6294677f8acc6bc040baf922910473e1c82995ba
|
[
"Apache-2.0"
] | null | null | null |
python/dgl/nn/pytorch/conv/agnnconv.py
|
gvvynplaine/dgl
|
6294677f8acc6bc040baf922910473e1c82995ba
|
[
"Apache-2.0"
] | null | null | null |
"""Torch Module for Attention-based Graph Neural Network layer"""
# pylint: disable= no-member, arguments-differ, invalid-name
import torch as th
from torch import nn
from torch.nn import functional as F
from .... import function as fn
from ....ops import edge_softmax
from ....base import DGLError
from ....utils import expand_as_pair
class AGNNConv(nn.Module):
r"""
Description
-----------
Attention-based Graph Neural Network layer from paper `Attention-based
Graph Neural Network for Semi-Supervised Learning
<https://arxiv.org/abs/1803.03735>`__.
.. math::
H^{l+1} = P H^{l}
where :math:`P` is computed as:
.. math::
P_{ij} = \mathrm{softmax}_i ( \beta \cdot \cos(h_i^l, h_j^l))
where :math:`\beta` is a single scalar parameter.
Parameters
----------
init_beta : float, optional
The :math:`\beta` in the formula, a single scalar parameter.
learn_beta : bool, optional
If True, :math:`\beta` will be learnable parameter.
allow_zero_in_degree : bool, optional
If there are 0-in-degree nodes in the graph, output for those nodes will be invalid
since no message will be passed to those nodes. This is harmful for some applications
causing silent performance regression. This module will raise a DGLError if it detects
0-in-degree nodes in input graph. By setting ``True``, it will suppress the check
and let the users handle it by themselves. Default: ``False``.
Notes
-----
Zero in-degree nodes will lead to invalid output value. This is because no message
will be passed to those nodes, the aggregation function will be appied on empty input.
A common practice to avoid this is to add a self-loop for each node in the graph if
it is homogeneous, which can be achieved by:
>>> g = ... # a DGLGraph
>>> g = dgl.add_self_loop(g)
Calling ``add_self_loop`` will not work for some graphs, for example, heterogeneous graph
since the edge type can not be decided for self_loop edges. Set ``allow_zero_in_degree``
to ``True`` for those cases to unblock the code and handle zere-in-degree nodes manually.
A common practise to handle this is to filter out the nodes with zere-in-degree when use
after conv.
Example
-------
>>> import dgl
>>> import numpy as np
>>> import torch as th
>>> from dgl.nn import AGNNConv
>>>
>>> g = dgl.graph(([0,1,2,3,2,5], [1,2,3,4,0,3]))
>>> g = dgl.add_self_loop(g)
>>> feat = th.ones(6, 10)
>>> conv = AGNNConv()
>>> res = conv(g, feat)
>>> res
tensor([[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]],
grad_fn=<BinaryReduceBackward>)
"""
def __init__(self,
init_beta=1.,
learn_beta=True,
allow_zero_in_degree=False):
super(AGNNConv, self).__init__()
self._allow_zero_in_degree = allow_zero_in_degree
if learn_beta:
self.beta = nn.Parameter(th.Tensor([init_beta]))
else:
self.register_buffer('beta', th.Tensor([init_beta]))
def forward(self, graph, feat):
r"""
Description
-----------
Compute AGNN layer.
Parameters
----------
graph : DGLGraph
The graph.
feat : torch.Tensor
The input feature of shape :math:`(N, *)` :math:`N` is the
number of nodes, and :math:`*` could be of any shape.
If a pair of torch.Tensor is given, the pair must contain two tensors of shape
:math:`(N_{in}, *)` and :math:`(N_{out}, *)`, the :math:`*` in the later
tensor must equal the previous one.
Returns
-------
torch.Tensor
The output feature of shape :math:`(N, *)` where :math:`*`
should be the same as input shape.
Raises
------
DGLError
If there are 0-in-degree nodes in the input graph, it will raise DGLError
since no message will be passed to those nodes. This will cause invalid output.
The error can be ignored by setting ``allow_zero_in_degree`` parameter to ``True``.
"""
with graph.local_scope():
if not self._allow_zero_in_degree:
if (graph.in_degrees() == 0).any():
raise DGLError('There are 0-in-degree nodes in the graph, '
'output for those nodes will be invalid. '
'This is harmful for some applications, '
'causing silent performance regression. '
'Adding self-loop on the input graph by '
'calling `g = dgl.add_self_loop(g)` will resolve '
'the issue. Setting ``allow_zero_in_degree`` '
'to be `True` when constructing this module will '
'suppress the check and let the code run.')
feat_src, feat_dst = expand_as_pair(feat, graph)
graph.srcdata['h'] = feat_src
graph.srcdata['norm_h'] = F.normalize(feat_src, p=2, dim=-1)
if isinstance(feat, tuple) or graph.is_block:
graph.dstdata['norm_h'] = F.normalize(feat_dst, p=2, dim=-1)
# compute cosine distance
graph.apply_edges(fn.u_dot_v('norm_h', 'norm_h', 'cos'))
cos = graph.edata.pop('cos')
e = self.beta * cos
graph.edata['p'] = edge_softmax(graph, e)
graph.update_all(fn.u_mul_e('h', 'p', 'm'), fn.sum('m', 'h'))
return graph.dstdata.pop('h')
| 40.06
| 95
| 0.560825
|
99b1c097234f4402a0e4c4efbbab2058723dabe0
| 962
|
py
|
Python
|
.github/actions/pr-to-update-go/pr_to_update_go/__main__.py
|
hbeatty/incubator-trafficcontrol
|
13ed991531778c60298eb8f532b2a4862f7cb67b
|
[
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1
|
2021-03-07T06:11:30.000Z
|
2021-03-07T06:11:30.000Z
|
.github/actions/pr-to-update-go/pr_to_update_go/__main__.py
|
hbeatty/incubator-trafficcontrol
|
13ed991531778c60298eb8f532b2a4862f7cb67b
|
[
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1
|
2022-03-02T10:45:10.000Z
|
2022-03-02T10:45:10.000Z
|
.github/actions/pr-to-update-go/pr_to_update_go/__main__.py
|
hbeatty/incubator-trafficcontrol
|
13ed991531778c60298eb8f532b2a4862f7cb67b
|
[
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
from github.MainClass import Github
from pr_to_update_go.go_pr_maker import GoPRMaker
from pr_to_update_go.constants import ENV_GITHUB_TOKEN
def main() -> None:
try:
github_token: str = os.environ[ENV_GITHUB_TOKEN]
except KeyError:
print(f'Environment variable {ENV_GITHUB_TOKEN} must be defined.')
sys.exit(1)
gh = Github(login_or_token=github_token)
GoPRMaker(gh).run()
main()
| 29.151515
| 74
| 0.77027
|
ea2e6d3b745c8a8d0805db680a800afb8487f777
| 3,259
|
py
|
Python
|
src/helpers/init_helper.py
|
wqliu657/DSNet
|
1804176e2e8b57846beb063667448982273fca89
|
[
"MIT"
] | 113
|
2020-12-04T21:27:34.000Z
|
2022-03-31T11:09:51.000Z
|
src/helpers/init_helper.py
|
wqliu657/DSNet
|
1804176e2e8b57846beb063667448982273fca89
|
[
"MIT"
] | 23
|
2021-02-26T15:15:36.000Z
|
2022-03-24T12:37:08.000Z
|
src/helpers/init_helper.py
|
wqliu657/DSNet
|
1804176e2e8b57846beb063667448982273fca89
|
[
"MIT"
] | 34
|
2020-12-19T08:38:29.000Z
|
2022-02-25T05:49:43.000Z
|
import argparse
import logging
import random
from pathlib import Path
import numpy as np
import torch
def set_random_seed(seed: int) -> None:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def init_logger(log_dir: str, log_file: str) -> None:
logger = logging.getLogger()
format_str = r'[%(asctime)s] %(message)s'
logging.basicConfig(
level=logging.INFO,
datefmt=r'%Y/%m/%d %H:%M:%S',
format=format_str
)
log_dir = Path(log_dir)
log_dir.mkdir(parents=True, exist_ok=True)
fh = logging.FileHandler(str(log_dir / log_file))
fh.setFormatter(logging.Formatter(format_str))
logger.addHandler(fh)
def get_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
# model type
parser.add_argument('model', type=str,
choices=('anchor-based', 'anchor-free'))
# training & evaluation
parser.add_argument('--device', type=str, default='cuda',
choices=('cuda', 'cpu'))
parser.add_argument('--seed', type=int, default=12345)
parser.add_argument('--splits', type=str, nargs='+', default=[])
parser.add_argument('--max-epoch', type=int, default=300)
parser.add_argument('--model-dir', type=str, default='../models/model')
parser.add_argument('--log-file', type=str, default='log.txt')
parser.add_argument('--lr', type=float, default=5e-5)
parser.add_argument('--weight-decay', type=float, default=1e-5)
parser.add_argument('--lambda-reg', type=float, default=1.0)
parser.add_argument('--nms-thresh', type=float, default=0.5)
# inference
parser.add_argument('--ckpt-path', type=str, default=None)
parser.add_argument('--sample-rate', type=int, default=15)
parser.add_argument('--source', type=str, default=None)
parser.add_argument('--save-path', type=str, default=None)
# common model config
parser.add_argument('--base-model', type=str, default='attention',
choices=['attention', 'lstm', 'linear', 'bilstm',
'gcn'])
parser.add_argument('--num-head', type=int, default=8)
parser.add_argument('--num-feature', type=int, default=1024)
parser.add_argument('--num-hidden', type=int, default=128)
# anchor based
parser.add_argument('--neg-sample-ratio', type=float, default=2.0)
parser.add_argument('--incomplete-sample-ratio', type=float, default=1.0)
parser.add_argument('--pos-iou-thresh', type=float, default=0.6)
parser.add_argument('--neg-iou-thresh', type=float, default=0.0)
parser.add_argument('--incomplete-iou-thresh', type=float, default=0.3)
parser.add_argument('--anchor-scales', type=int, nargs='+',
default=[4, 8, 16, 32])
# anchor free
parser.add_argument('--lambda-ctr', type=float, default=1.0)
parser.add_argument('--cls-loss', type=str, default='focal',
choices=['focal', 'cross-entropy'])
parser.add_argument('--reg-loss', type=str, default='soft-iou',
choices=['soft-iou', 'smooth-l1'])
return parser
def get_arguments() -> argparse.Namespace:
parser = get_parser()
args = parser.parse_args()
return args
| 37.034091
| 77
| 0.641608
|
1301902f1173642def2cbb021ca679a5f33efa76
| 3,684
|
py
|
Python
|
src/npc/careers4.py
|
Monarda/monarda_bot
|
2eb2fa9726a6eeee19b56dd3cb39fcfba744c0aa
|
[
"MIT"
] | null | null | null |
src/npc/careers4.py
|
Monarda/monarda_bot
|
2eb2fa9726a6eeee19b56dd3cb39fcfba744c0aa
|
[
"MIT"
] | null | null | null |
src/npc/careers4.py
|
Monarda/monarda_bot
|
2eb2fa9726a6eeee19b56dd3cb39fcfba744c0aa
|
[
"MIT"
] | null | null | null |
import json
import importlib.resources
from ... import data
with importlib.resources.open_text(data,'careers.json') as f:
_careers_data = json.load(f)
class Careers4:
# Load data about careers, talents and skills
def __init__(self):
pass
def __complex_stuff(self):
self._skills_to_careers = {}
self._talents_to_careers = {}
self._careers_by_class = dict()
self._careers_by_earning_skill = dict()
self._skills = set()
self._talents = set()
for careername in _careers_data:
earning_skill = _careers_data[careername]['earning skill']
if earning_skill in self._careers_by_earning_skill:
self._careers_by_earning_skill[earning_skill].append(careername)
else:
self._careers_by_earning_skill[earning_skill] = [careername]
for rank in range(1,5):
rankname = _careers_data[careername]['rank {}'.format(rank)]["name"]
# Careers which provide this skill
skills = _careers_data[careername]['rank {}'.format(rank)]['skills']
self._skills.update(skills)
for skill in skills:
value = {"careername":careername, "rank":rank, "rankname":rankname}
if skill in self._skills_to_careers:
self._skills_to_careers[skill].append( value)
else:
self._skills_to_careers[skill] = [value]
# Careers which provide this talent
talents = _careers_data[careername]['rank {}'.format(rank)]['talents']
self._talents.update(talents)
for talent in talents:
value = {"careername":careername, "rank":rank, "rankname":rankname}
if talent in self._talents_to_careers:
self._talents_to_careers[talent].append( value)
else:
self._talents_to_careers[talent] = [value]
# Careers by class
classname = _careers_data[careername]['class']
if classname in self._careers_by_class:
self._careers_by_class[classname].update([careername])
else:
self._careers_by_class[classname] = set([careername])
@property
def careers(self):
return list(_careers_data.keys())
@property
def career_levels(self):
career_levels = {}
for careername in self.careers:
for i in range (1,5):
career_level_name = self[careername][f'rank {i}']['name']
if career_level_name in career_levels:
career_levels[career_level_name].append(f'{careername} {i}')
else:
career_levels[career_level_name] = [f'{careername} {i}']
return career_levels
def __getitem__(self, key):
key = key.title()
return dict(_careers_data[key])
def provides_skills(self):
self.__complex_stuff()
return self._skills_to_careers
def provides_skill(self, skill):
self.__complex_stuff()
return self._skills_to_careers[skill]
def main():
c4 = Careers4()
print(c4.provides_skill('Sail (Any)'))
print(c4._talents_to_careers['Sprinter'])
print(len(c4.list_careers()), sorted(c4.list_careers()))
for skill in sorted(c4._careers_by_earning_skill.keys()):
print('{:20s}: {}'.format(skill, ', '.join(c4._careers_by_earning_skill[skill])))
if __name__ == "__main__":
# execute only if run as a script
main()
| 34.754717
| 89
| 0.587676
|
8cdbde45a2bf3b396d871150b7723d333093c229
| 63,576
|
py
|
Python
|
antlir/subvol_utils.py
|
baioc/antlir
|
e3b47407b72c4aee835adf4e68fccd9abff457f2
|
[
"MIT"
] | null | null | null |
antlir/subvol_utils.py
|
baioc/antlir
|
e3b47407b72c4aee835adf4e68fccd9abff457f2
|
[
"MIT"
] | null | null | null |
antlir/subvol_utils.py
|
baioc/antlir
|
e3b47407b72c4aee835adf4e68fccd9abff457f2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
import logging
import os
import platform
import subprocess
import sys
import time
from contextlib import contextmanager, ExitStack
from typing import AnyStr, BinaryIO, Iterable, Iterator, Optional, TypeVar
from .artifacts_dir import find_artifacts_dir
from .btrfs_diff.freeze import DoNotFreeze
from .common import (
check_popen_returncode,
get_logger,
open_fd,
pipe,
run_stdout_to_err,
)
from .compiler.subvolume_on_disk import SubvolumeOnDisk
from .fs_utils import Path, temp_dir
from .loopback import BtrfsLoopbackVolume, MIN_CREATE_BYTES
from .loopback_opts_t import loopback_opts_t
from .unshare import Namespace, Unshare, nsenter_as_root, nsenter_as_user
log = get_logger()
KiB = 2 ** 10
MiB = 2 ** 20
# Exposed as a helper so that test_compiler.py can mock it.
def _path_is_btrfs_subvol(path: Path) -> bool:
"Ensure that there is a btrfs subvolume at this path. As per @kdave at"
"https://stackoverflow.com/a/32865333"
# You'd think I could just `os.statvfs`, but no, not until Py3.7
# https://bugs.python.org/issue32143
fs_type = subprocess.run(
["stat", "-f", "--format=%T", path], stdout=subprocess.PIPE, text=True
).stdout.strip()
ino = os.stat(path).st_ino
return fs_type == "btrfs" and ino == 256
T = TypeVar
# This is IMPORTANT. It does not just allow us to slightly reduce the waste
# factor, but also avoids having to handling `btrfs send` getting SIGPIPE in
# certain out-of-disk contexts.
def _drain_pipe_return_byte_count(f: BinaryIO) -> int:
# This would be better with the `splice` syscall, but that's too much work.
chunk_size = 2 ** 19 # limit RAM usage
total = 0
while True:
num_read = len(f.read(chunk_size))
if not num_read:
return total
total += num_read
# HACK ALERT: `Subvol.delete()` removes subvolumes nested inside it. Some
# of these may also be tracked as `Subvol` objects. In this scenario,
# we have to update `._exists` for the nested `Subvol`s. This global
# registry contains all the **created and not deleted** `Subvol`s known
# to the current program.
#
# This design is emphatically not thread-safe etc. It also leaks any
# `Subvol` objects that are destroyed without deleting the underlying
# subvolume.
_UUID_TO_SUBVOLS = {}
def _mark_deleted(uuid: str):
"Mark all the clones of this `Subvol` as deleted. Ignores unknown UUIDs."
subvols = _UUID_TO_SUBVOLS.get(uuid)
if not subvols:
# This happens if we are deleting a subvolume created outside of the
# Antlir compiler, which is nested in a `Subvol`.
return
for sv in subvols:
# Not checking that `._path` agrees because that check would
# take work to make non-fragile.
assert uuid == sv._uuid, (uuid, sv._uuid, sv._path)
sv._USE_mark_created_deleted_INSTEAD_exists = False
sv._uuid = None
del _UUID_TO_SUBVOLS[uuid]
def _query_uuid(subvol: "Subvol", path: Path):
res = subvol.run_as_root(
["btrfs", "subvolume", "show", path], stdout=subprocess.PIPE
)
res.check_returncode()
subvol_metadata = res.stdout.split(b"\n", 3)
# /
# Name: <FS_TREE>
# UUID: 15a88f92-4185-47c9-8048-f065a159f119
# Parent UUID: -
# Received UUID: -
# Creation time: 2020-09-30 09:36:02 -0700
# Subvolume ID: 5
# Generation: 2045967
# Gen at creation: 0
# Parent ID: 0
# Top level ID: 0
# Flags: -
# Snapshot(s):
return subvol_metadata[2].split(b":")[1].decode().strip()
# Subvol is marked as `DoNotFreeze` as it's hash is just of
# byte string that contains the path to the subvol. It's member
# variables are just a cache of the external state of the subvol
# and do not affect its hash.
class Subvol(DoNotFreeze):
"""
## What is this for?
This class is to be a privilege / abstraction boundary that allows
regular, unprivileged Python code to construct images. Many btrfs
ioctls require CAP_SYS_ADMIN (or some kind of analog -- e.g. a
`libguestfs` VM or a privileged server for performing image operations).
Furthermore, writes to the image-under-construction may require similar
sorts of privileges to manipulate the image-under-construction as uid 0.
One approach would be to eschew privilege boundaries, and to run the
entire build process as `root`. However, that would forever confine our
build tool to be run in VMs and other tightly isolated contexts. Since
unprivileged image construction is technically possible, we will instead
take the approach that -- as much as possible -- the build code runs
unprivileged, as the repo-owning user, and only manipulates the
filesystem-under-construction via this one class.
For now, this means shelling out via `sudo`, but in the future,
`libguestfs` or a privileged filesystem construction proxy could be
swapped in with minimal changes to the overall structure.
## Usage
- Think of `Subvol` as a ticket to operate on a btrfs subvolume that
exists, or is about to be created, at a known path on disk. This
convention lets us cleanly describe paths on a subvolume that does not
yet physically exist.
- Call the functions from the btrfs section to manage the subvolumes.
- Call `subvol.run_as_root()` to use shell commands to manipulate the
image under construction.
- Call `subvol.path('image/relative/path')` to refer to paths inside the
subvolume e.g. in arguments to the `subvol.run_*` functions.
"""
def __init__(
self,
path: AnyStr,
*,
already_exists=False,
_test_only_allow_existing=False,
):
"""
`Subvol` can represent not-yet-created (or created-and-deleted)
subvolumes. Unless already_exists=True, you must call create() or
snapshot() to actually make the subvolume.
WATCH OUT: Because of `_UUID_TO_SUBVOLS`, all `Subvol` objects in the
"exists" state (created, snapshotted, initialized with
`already_exists=True`, etc) will **leak** if the owning code loses
the last reference without deleting the underlying subvol.
This is OK for now since we don't store any expensive / mutexed
resources here. However, if this ever changes, we may need to play
difficult games with `weakref` to avoid leaking those resources.
"""
self._path = Path(path).abspath()
self._USE_mark_created_deleted_INSTEAD_exists = False
self._uuid = None
if already_exists:
if not _path_is_btrfs_subvol(self._path):
raise AssertionError(f"No btrfs subvol at {self._path}")
self._mark_created()
elif not _test_only_allow_existing:
assert not os.path.exists(self._path), self._path
# This is read-only because any writes bypassing the `_mark*` functions
# would violate our internal invariants.
@property
def _exists(self):
return self._USE_mark_created_deleted_INSTEAD_exists
def _mark_created(self):
assert not self._exists and not self._uuid, (self._path, self._uuid)
self._USE_mark_created_deleted_INSTEAD_exists = True
# The UUID is valid only while `._exists == True`
self._uuid = _query_uuid(self, self.path())
# This not a set because our `hash()` is based on just `._path` and
# we really care about object identity here.
_UUID_TO_SUBVOLS.setdefault(self._uuid, []).append(self)
def _mark_deleted(self):
assert self._exists and self._uuid, self._path
assert any(
# `_mark_deleted()` will ignore unknown UUIDs, but ours must be
# known since we are not deleted.
(self is sv)
for sv in _UUID_TO_SUBVOLS.get(self._uuid, [])
), (self._uuid, self._path)
_mark_deleted(self._uuid)
def __eq__(self, other: "Subvol") -> bool:
assert self._exists == other._exists, self.path()
equal = self._path == other._path
assert not equal or self._uuid == other._uuid, (
self._path,
self._uuid,
other._uuid,
)
return equal
# `__hash__` contains only `_path`. The member variables
# of `Subvol` are just a cache of the external state of the subvol.
def __hash__(self) -> int:
return hash(self._path)
def path(
self, path_in_subvol: AnyStr = b".", *, no_dereference_leaf=False
) -> Path:
"""
The only safe way to access paths inside the subvolume. Do NOT
`os.path.join(subvol.path('a/path'), 'more/path')`, since that skips
crucial safety checks. Instead: `subvol.path(os.path.join(...))`.
See the `Path.ensure_child` doc for more details.
"""
# It's important that this is normalized. The `btrfs` CLI is not
# very flexible, so it will try to name a subvol '.' if we do not
# normalize `/subvol/.`.
return self._path.normalized_subpath(
path_in_subvol, no_dereference_leaf=no_dereference_leaf
)
def canonicalize_path(self, path: AnyStr) -> Path:
"""
IMPORTANT: At present, this will silently fail to resolve symlinks
in the image that are not traversible by the repo user. This means
it's really only appropriate for paths that are known to be
world-readable, e.g. repo snapshot stuff in `__antlir__`.
The analog of `os.path.realpath()` taking an in-subvolume path
(subvol-absolute or relative to subvol root) and returning a
subvolume-absolute path.
Due to a limitation of `path()` this will currently fail on any
components that are absolute symlinks, but there's no strong
incentive to do the more complex correct implementation (yet).
"""
assert self._exists, f"{self.path()} does not exist"
root = self.path().realpath()
rel = self.path(path).realpath()
if rel == root:
return Path("/")
assert rel.startswith(root + b"/"), (rel, root)
return Path("/") / rel.relpath(root)
# This differs from the regular `subprocess.Popen` interface in these ways:
# - stdout maps to stderr by default (to protect the caller's stdout),
# - `check` is supported, and default to `True`,
# - `cwd` is prohibited.
#
# `_subvol_exists` is a private kwarg letting us `run_as_root` to create
# new subvolumes, and not just touch existing ones.
@contextmanager
def popen_as_root(
self, args, *, _subvol_exists=True, stdout=None, check=True, **kwargs
):
if "cwd" in kwargs:
raise AssertionError(
"cwd= is not permitted as an argument to run_as_root, "
"because that makes it too easy to accidentally traverse "
"a symlink from inside the container and touch the host "
"filesystem. Best practice: wrap your path with "
"Subvol.path() as close as possible to its site of use."
)
if "pass_fds" in kwargs:
# Future: if you add support for this, see the note on how to
# improve `receive`, too.
#
# Why doesn't `pass_fds` just work? `sudo` closes all FDs in a
# (likely misguided) attempt to improve security. `sudo -C` can
# help here, but it's disabled by default.
raise NotImplementedError( # pragma: no cover
"But there is a straightforward fix -- you would need to "
"move the usage of our FD-passing wrapper from "
"nspawn_in_subvol.py to this function."
)
if _subvol_exists != self._exists:
raise AssertionError(
f"{self.path()} exists is {self._exists}, not {_subvol_exists}"
)
# Ban our subcommands from writing to stdout, since many of our
# tools (e.g. make-demo-sendstream, compiler) write structured
# data to stdout to be usable in pipelines.
if stdout is None:
stdout = 2
# The '--' is to avoid `args` from accidentally being parsed as
# environment variables or `sudo` options.
with subprocess.Popen(
# Clobber any pre-existing `TMP` because in the context of Buck,
# this is often set to something inside the repo's `buck-out`
# (as documented in https://buck.build/rule/genrule.html).
# Using the in-repo temporary directory causes a variety of
# issues, including (i) `yum` leaking root-owned files into
# `buck-out`, breaking `buck clean`, and (ii) `systemd-nspawn`
# bugging out with "Failed to create inaccessible file node"
# when we use `--bind-repo-ro`.
["sudo", "TMP=", "--", *args],
stdout=stdout,
**kwargs,
) as pr:
yield pr
if check:
check_popen_returncode(pr)
def run_as_root(
self,
args,
timeout=None,
input=None,
_subvol_exists=True,
check=True,
**kwargs,
):
"""
Run a command against an image. IMPORTANT: You MUST wrap all image
paths with `Subvol.path`, see that function's docblock.
Mostly API-compatible with subprocess.run, except that:
- `check` defaults to True instead of False,
- `stdout` is redirected to stderr by default,
- `cwd` is prohibited.
"""
# IMPORTANT: Any logic that CAN go in popen_as_root, MUST go there.
if input:
assert "stdin" not in kwargs
kwargs["stdin"] = subprocess.PIPE
with self.popen_as_root(
args, _subvol_exists=_subvol_exists, check=check, **kwargs
) as proc:
stdout, stderr = proc.communicate(timeout=timeout, input=input)
return subprocess.CompletedProcess(
args=proc.args,
returncode=proc.returncode,
stdout=stdout,
stderr=stderr,
)
# Future: run_in_image()
# From here on out, every public method directly maps to the btrfs API.
# For now, we shell out, but in the future, we may talk to a privileged
# `btrfsutil` helper, or use `guestfs`.
def create(self) -> "Subvol":
self.run_as_root(
["btrfs", "subvolume", "create", self.path()], _subvol_exists=False
)
self._mark_created()
return self
@contextmanager
def maybe_create_externally(self) -> Iterator[None]:
assert not self._exists, self._path
assert not os.path.exists(self._path), self._path
try:
yield
finally:
if os.path.exists(self._path):
self._mark_created()
def snapshot(self, source: "Subvol") -> "Subvol":
# Since `snapshot` has awkward semantics around the `dest`,
# `_subvol_exists` won't be enough and we ought to ensure that the
# path physically does not exist. This needs to run as root, since
# `os.path.exists` may not have the right permissions.
self.run_as_root(["test", "!", "-e", self.path()], _subvol_exists=False)
self.run_as_root(
["btrfs", "subvolume", "snapshot", source.path(), self.path()],
_subvol_exists=False,
)
self._mark_created()
return self
@contextmanager
def delete_on_exit(self) -> Iterator["Subvol"]:
"Delete the subvol if it exists when exiting the context."
try:
yield self
finally:
if self._exists:
self.delete()
def delete(self):
"""
This will delete the subvol AND all nested/inner subvolumes that
exist underneath this subvol.
This fails if the `Subvol` does not exist. This is because normal
business logic explicit deletion can safely assume that the `Subvol`
was already created. This is a built-in correctness check.
For "cleanup" logic, check out `delete_on_exit`.
"""
assert self._exists, self._path
# Set RW from the outermost to the innermost
subvols = list(self._gen_inner_subvol_paths())
self.set_readonly(False)
for inner_path in sorted(subvols):
assert _path_is_btrfs_subvol(inner_path), inner_path
self.run_as_root(
["btrfs", "property", "set", "-ts", inner_path, "ro", "false"]
)
# Delete from the innermost to the outermost
for inner_path in sorted(subvols, reverse=True):
uuid = _query_uuid(self, inner_path)
self.run_as_root(["btrfs", "subvolume", "delete", inner_path])
# Will succeed even if this subvolume was created by a
# subcommand, and is not tracked in `_UUID_TO_SUBVOLS`
_mark_deleted(uuid)
self.run_as_root(["btrfs", "subvolume", "delete", self.path()])
self._mark_deleted()
def _gen_inner_subvol_paths(self) -> Iterable[Path]:
"""
Implementation detail for `delete`.
The intent of the code below is to make as many assertions as
possible to avoid accidentally deleting a subvolume that's not a
descendant of `self.` So, we write many assertions. Additionally,
this gets some implicit safeguards from other `Subvol` methods.
- `.path` checks the inner subvol paths to ensure they're not
traversing symlinks to go outside of the subvol.
- The fact that `Subvol` exists means that we checked that it's a
subvolume at construction time -- this is important since `btrfs
subvol list -o` returns bad results for non-subvolume paths.
Moreover, our `btrfs subvol show` reconfirms it.
"""
# `btrfs subvol {show,list}` both use the subvolume's path relative
# to the volume root.
my_rel_to_vol_root, _ = self.run_as_root(
["btrfs", "subvolume", "show", self.path()], stdout=subprocess.PIPE
).stdout.split(b"\n", 1)
my_path = self.path()
# NB: The below will fire if `Subvol` is the root subvol, since its
# relative path is `/`. However, that's not a case we need to
# support in any foreseeable future, and it would also require
# special-casing in the prefix search logic.
assert not my_rel_to_vol_root.startswith(b"/"), my_rel_to_vol_root
# Depending on how this subvolume has been mounted and is being used
# the interaction between the `btrfs subvolume show` path (the first
# line of `btrfs subvolume show` is what we care about) and this
# subvolume path (`self.path()`) is different. The cases we have to
# solve for are as it relates to inner subvolumes are:
# - This subvolume is used as the "root" subvol for a container
# and inner subvols are created within that container.
# This is what happens with `nspawn_in_subvol`, ie: as part of an
# `image_*_unittest`, `image.genrule`, or via a `=container`
# `buck run` target. In this case the btrfs volume is mounted
# using a `subvol=` mount option, resulting in the mount "seeing"
# only the contents of the selected subvol.
# - This subvol is used on the *host* machine (where `buck` runs)
# and inner subvols are created. This is the standard case for
# `*_unittest` targets since those are executed in the host context.
# In this case the btrfs volume is mounted such that the `FS_TREE`
# subvol (id=5) is used resulting in the mount "seeing" *all*
# of the subvols contained within the volume.
# In this case the output of `btrfs subvolume show` looks something
# like this (taken from running the `:test-subvol-utils` test):
#
# tmp/delete_recursiveo7x56sn2/outer
# Name: outer
# UUID: aa2d8590-ba00-8a45-aee2-c1553f3dd292
# Parent UUID: -
# Received UUID: -
# Creation time: 2021-05-18 08:07:17 -0700
# Subvolume ID: 323
# Generation: 92
# Gen at creation: 89
# Parent ID: 5
# Top level ID: 5
# Flags: -
# Snapshot(s):
# and `my_path` looks something like this:
# /data/users/lsalis/fbsource/fbcode/buck-image-out/volume/tmp/delete_recursiveo7x56sn2/outer # noqa: E501
vol_mounted_at_fstree = my_path.endswith(b"/" + my_rel_to_vol_root)
# In this case the output of `btrfs subvolume show` looks something
# like this (taken from running the `:test-subvol-utils-inner` test):
#
#
# tmp/TempSubvolumes_wk81xmx0/test-subvol-utils-inner__test_layer:Jb__IyU.HzvZ.p73f/delete_recursiveotwxda64/outer # noqa: E501
# Name: outer
# UUID: 76866b7c-c4cc-1d4b-bafa-6aa6f898de16
# Parent UUID: -
# Received UUID: -
# Creation time: 2021-05-18 08:04:01 -0700
# Subvolume ID: 319
# Generation: 87
# Gen at creation: 84
# Parent ID: 318
# Top level ID: 318
# Flags: -
# Snapshot(s):
#
# and `my_path` looks something like this:
# /delete_recursiveotwxda64/outer
vol_mounted_at_subvol = my_rel_to_vol_root.endswith(my_path)
assert vol_mounted_at_fstree ^ vol_mounted_at_subvol, (
"Unexpected paths calculated from btrfs subvolume show: "
f"{my_rel_to_vol_root}, {my_path}"
)
# In either case we need to calculate what the proper vol_dir is, this
# is used below to list all the subvolumes that the volume contains
# and filter out subvolumes that are "inside" this subvol.
# If the volume has been mounted as an fstree (see the comments above)
# then we want to list subvols below the "root" of the volume, which is
# right above the path returned by `btrfs subvolume show`.
# Example `btrfs subvolume list` (taken from `:test-subvol-utils`):
#
# ]# btrfs subvolume list /data/users/lsalis/fbsource/fbcode/buck-image-out/volume/ # noqa: E501
# ID 260 gen 20 top level 5 path targets/test-layer:Jb__FIQ.HyZR.fkyU/volume # noqa: E501
# ID 266 gen 83 top level 5 path targets/test-subvol-utils-inner__test_layer:Jb__IyU.HzvZ.p73f/volume # noqa: E501
# ID 272 gen 64 top level 5 path targets/build-appliance.c7:Jb__hV4.H42o.pR_O/volume # noqa: E501
# ID 300 gen 66 top level 5 path targets/build_appliance_testingprecursor-without-caches-to-build_appliance_testing:Jb__o1c.H8Bc.ASOl/volume # noqa: E501
# ID 307 gen 70 top level 5 path targets/build_appliance_testing:Jb__rtA.H89Z.j0z3/volume # noqa: E501
# ID 308 gen 72 top level 5 path targets/hello_world_base:Jb__u0g.H9yB.t9oN/volume # noqa: E501
# ID 323 gen 92 top level 5 path tmp/delete_recursiveo7x56sn2/outer
# ID 324 gen 91 top level 323 path tmp/delete_recursiveo7x56sn2/outer/inner1 # noqa: E501
# ID 325 gen 91 top level 324 path tmp/delete_recursiveo7x56sn2/outer/inner1/inner2 # noqa: E501
# ID 326 gen 92 top level 323 path tmp/delete_recursiveo7x56sn2/outer/inner3 # noqa: E501
# ]#
if vol_mounted_at_fstree:
vol_dir = my_path[: -len(my_rel_to_vol_root)]
my_prefix = my_rel_to_vol_root
# If the volume has been mounted at a specific subvol (see the comments
# above). Then we want to list subvols below `/` since that is seen
# to be the "root" of the volume.
# Example `btrfs subvolume list` taken from `:test-subvol-utils-inner`:
#
# ]# btrfs subvolume list /
# ID 260 gen 20 top level 5 path targets/test-layer:Jb__FIQ.HyZR.fkyU/volume # noqa: E501
# ID 266 gen 83 top level 5 path targets/test-subvol-utils-inner__test_layer:Jb__IyU.HzvZ.p73f/volume # noqa: E501
# ID 272 gen 64 top level 5 path targets/build-appliance.c7:Jb__hV4.H42o.pR_O/volume # noqa: E501
# ID 300 gen 66 top level 5 path targets/build_appliance_testingprecursor-without-caches-to-build_appliance_testing:Jb__o1c.H8Bc.ASOl/volume # noqa: E501
# ID 307 gen 70 top level 5 path targets/build_appliance_testing:Jb__rtA.H89Z.j0z3/volume # noqa: E501
# ID 308 gen 72 top level 5 path targets/hello_world_base:Jb__u0g.H9yB.t9oN/volume # noqa: E501
# ID 318 gen 84 top level 5 path tmp/TempSubvolumes_wk81xmx0/test-subvol-utils-inner__test_layer:Jb__IyU.HzvZ.p73f # noqa: E501
# ID 319 gen 87 top level 318 path delete_recursiveotwxda64/outer
# ID 320 gen 86 top level 319 path delete_recursiveotwxda64/outer/inner1 # noqa: E501
# ID 321 gen 86 top level 320 path delete_recursiveotwxda64/outer/inner1/inner2 # noqa: E501
# ID 322 gen 87 top level 319 path delete_recursiveotwxda64/outer/inner3 # noqa: E501
# ]#
# Note: code coverage for this branch is in the
# :test-subvol-utils-inner test, but because of the way
# coverage works I can't properly cover this in the larger
# :test-subvol-utils test.
elif vol_mounted_at_subvol: # pragma: nocover
vol_dir = b"/"
my_prefix = my_path[1:]
# We need a trailing slash to chop off this path prefix below.
my_prefix = my_prefix + (b"" if my_prefix.endswith(b"/") else b"/")
# NB: The `-o` option does not work correctly, don't even bother.
for inner_line in self.run_as_root(
["btrfs", "subvolume", "list", vol_dir], stdout=subprocess.PIPE
).stdout.split(b"\n"):
if not inner_line: # Handle the trailing newline
continue
l = {} # Used to check that the labels are as expected
(
l["ID"],
_,
l["gen"],
_,
l["top"],
l["level"],
_,
l["path"],
p,
) = inner_line.split(b" ", 8)
for k, v in l.items():
assert k.encode() == v, (k, v)
if not p.startswith(my_prefix): # Skip non-inner subvolumes
continue
inner_subvol = p[len(my_prefix) :]
assert inner_subvol == os.path.normpath(inner_subvol), inner_subvol
yield self.path(inner_subvol)
def set_readonly(self, readonly: bool):
self.run_as_root(
[
"btrfs",
"property",
"set",
"-ts",
self.path(),
"ro",
"true" if readonly else "false",
]
)
def set_seed_device(self, output_path: str):
# Clearing the seeding flag on a device may be dangerous. If a
# previously-seeding device is changed, all filesystems that used that
# device will become unmountable. Setting the seeding flag back will
# not fix that.
# Due to this danger and the limited usefulness we don't support
# clearing the seed flag.
self.run_as_root(["btrfstune", "-S", "1", output_path])
def sync(self):
self.run_as_root(["btrfs", "filesystem", "sync", self.path()])
@contextmanager
def _mark_readonly_and_send(
self,
*,
stdout,
no_data: bool = False,
# pyre-fixme[9]: parent has type `Subvol`; used as `None`.
parent: "Subvol" = None,
) -> Iterator[subprocess.Popen]:
self.set_readonly(True)
# Btrfs bug #25329702: in some cases, a `send` without a sync will
# violate read-after-write consistency and send a "past" view of the
# filesystem. Do this on the read-only filesystem to improve
# consistency.
self.sync()
# Btrfs bug #25379871: our 4.6 kernels have an experimental xattr
# caching patch, which is broken, and results in xattrs not showing
# up in the `send` stream unless that metadata is `fsync`ed. For
# some dogscience reason, `getfattr` on a file actually triggers
# such an `fsync`. We do this on a read-only filesystem to improve
# consistency. Coverage: manually tested this on a 4.11 machine:
# platform.uname().release.startswith('4.11.')
if platform.uname().release.startswith("4.6."): # pragma: no cover
self.run_as_root(
[
# Symlinks can point outside of the subvol, don't follow
# them
"getfattr",
"--no-dereference",
"--recursive",
self.path(),
]
)
with self.popen_as_root(
[
"btrfs",
"send",
*(["--no-data"] if no_data else []),
*(["-p", parent.path()] if parent else []),
self.path(),
],
stdout=stdout,
) as proc:
yield proc
def mark_readonly_and_get_sendstream(self, **kwargs) -> bytes:
with self._mark_readonly_and_send(
stdout=subprocess.PIPE, **kwargs
) as proc:
# pyre-fixme[16]: Optional type has no attribute `read`.
return proc.stdout.read()
@contextmanager
def mark_readonly_and_write_sendstream_to_file(
self, outfile: BinaryIO, **kwargs
) -> Iterator[None]:
with self._mark_readonly_and_send(stdout=outfile, **kwargs):
yield
def mark_readonly_and_send_to_new_loopback(
self,
output_path,
loopback_opts: loopback_opts_t,
waste_factor=1.15,
) -> int:
"""
Overwrites `ouput_path` with a new btrfs image, and send this
subvolume to this new volume. The image is populated as a loopback
mount, which will be unmounted before this function returns.
Since btrfs sizing facilities are unreliable, we size the new
filesystem by guesstimating the content size of the filesystem, and
multiplying it by `waste_factor` to ensure that `receive` does not
run out of space. If out-of-space does occur, this function repeats
multiply-send-receive until we succeed, so a low `waste_factor` can
make image builds much slower.
## Notes on setting `waste_factor`
- This is exposed for unit tests, you should probably not surface
it to users. We should improve the auto-sizing instead.
- Even though sparse files make it fairly cheap to allocate a
much larger loopback than what is required to contain the
subvolume, we want to try to keep the loopback filesystem as
full as possible. The primary rationale is that parts of
our image distribution filesystem do not support sparse files
(to be fixed). Secondarily, btrfs seems to increase the
amount of overhead it permits itself as the base filesystem
becomes larger. I have not carefully measured the loopback
size after accounting for sparseness, but this needs to
be tested before considering much larger waste factors.
- While we resize down to `min-dev-size` after populating the
volume, setting a higher waste factor is still not free. The
reason is that btrfs auto-allocates more metadata blocks for
larger filesystems, but `resize` does not release them. So if
you start with a larger waste factor, your post-shrink
filesystem will be larger, too. This is one of the reasons why
we should just `findmnt -o SIZE` to determine a safe size of the
loopback (the other reason is below).
- The default of 15% is very conservative, with the goal of
never triggering an expensive send+receive combo. This seeks to
optimize developer happiness. In my tests, I have not seen a
filesystem that needed more than 5%. Later, we can add
monitoring and gradually dial this down.
- If your subvolume's `_estimate_content_bytes` is X, and it
fits in a loopback of size Y, it is not guaranteed that you
could have used `waste_factor = Y / X`, because lazy writes make
it possible to resize a populated filesystem to have a size
**below** what you would have needed to populate its content.
- There is an alternative strategy to "multiply by waste_factor &
re-send", which is to implement a `pv`-style function that
sits on a pipe between `send` and `receive`, and does the
following to make sure `receive` never runs out of space:
- `btrfs filesystem sync`, `usage`, and if "min" free space
drops too low, `resize`
- `splice` (via `ctypes`, or write this interposition program
in C) a chunk from `send` to `receive`. Using `splice`
instead of copying through userspace is not **necessarily**
essential, but in order to minimize latency, it's important
that we starve the `receive` end as rarely as possible,
which may require some degree of concurrency between reading
from `send` and writing to `receive`. To clarify: a naive
Python prototype that read & wrote 2MB at a time -- a buffer
that's large enough that we'd frequently starve `receive` or
stall `send` -- experienced a 30% increase in wall time
compared to `send | receive`.
- Monitor usage much more frequently than the free space to
chunk size ratio would indicate, since something may buffer.
Don't use a growth increment that is TOO small.
- Since there are no absolute guarantees that btrfs won't
run out of space on `receive`, there should still be an
outer retry layer, but it ought to never fire.
- Be aware that the minimum `mkfs.brfs` size is 108MiB, the
minimum size to which we can grow via `resize` is 175MiB,
while the minimum size to which we can shrink via `resize`
is 256MiB, so the early growth tactics should reflect this.
The advantage of this strategy of interposing on a pipe, if
implemented well, is that we should be able to achieve a smaller
waste factor without paying occasionally doubling our wall clock
and IOP costs due to retries. The disadvantage is that if we do
a lot of grow cycles prior to our shrink, the resulting
filesystem may end up being more out-of-tune than if we had
started with a large enough size from the beginning.
"""
if loopback_opts.size_mb:
leftover_bytes, image_size = self._send_to_loopback_if_fits(
output_path,
# pyre-fixme[58]: `*` is not supported for operand types
# `Optional[int]` and `Any`.
loopback_opts.size_mb * MiB,
loopback_opts,
)
# pyre-fixme[58]: `*` is not supported for operand types
# `Optional[int]` and `Any`.
assert image_size == loopback_opts.size_mb * MiB, (
f"{self._path} is {image_size} instead of the requested "
f"{loopback_opts.size_mb * MiB}"
)
attempts = 1
else:
# In my experiments, btrfs needs at least 81 MB of overhead in all
# circumstances, and this initial overhead is not multiplicative.
# To be specific, I tried single-file subvolumes with files of size
# 27, 69, 94, 129, 175, 220MiB.
fs_bytes = self._estimate_content_bytes() + 81 * MiB
# We also need to build an image of at least the MIN_CREATE_BYTES
# size required by btrfs.
fs_bytes = (
fs_bytes if fs_bytes >= MIN_CREATE_BYTES else MIN_CREATE_BYTES
)
attempts = 0
while True:
attempts += 1
fs_bytes *= waste_factor
leftover_bytes, image_size = self._send_to_loopback_if_fits(
output_path, int(fs_bytes), loopback_opts
)
if leftover_bytes == 0:
if not loopback_opts.minimize_size:
break
# The following simple trick saves about 30% of image size.
# The reason is that btrfs auto-allocates more metadata
# blocks for larger filesystems, but `resize` does not
# release them. For many practical use-cases the compression
# ratio is close to 2, hence initial `fs_bytes` estimate is
# too high.
(
leftover_bytes,
new_size,
) = self._send_to_loopback_second_pass(
output_path, image_size, loopback_opts
)
assert leftover_bytes == 0, (
f"Cannot fit {self._path} in {image_size} bytes, "
f"{leftover_bytes} sendstream bytes were left over"
)
assert new_size <= image_size, (
"The second pass of btrfs send-receive produced worse"
f"results that the first: {new_size} vs. {image_size}"
)
break # pragma: no cover
fs_bytes += leftover_bytes
log.warning(
f"{self._path} did not fit in {fs_bytes} bytes, "
f"{leftover_bytes} sendstream bytes were left over"
)
if loopback_opts.seed_device:
self.set_seed_device(output_path)
# Future: It would not be unreasonable to run some sanity checks on
# the resulting filesystem here. Ideas:
# - See if we have an unexpectedly large amount of unused metadata
# space, or other "waste" via `btrfs filesystem usage -b` --
# could ask @clm if this is a frequent issue.
# - Can we check for fragmentation / balance issues?
# - We could (very occasionally & maybe in the background, since
# this is expensive) check that the received subvolume is
# identical to the source subvolume.
return attempts
@contextmanager
def write_tarball_to_file(
self, outfile: BinaryIO, **kwargs
) -> Iterator[None]:
# gnu tar has a nasty bug where even if you specify `--one-file-system`
# it still tries to perform various operations on other mount points.
# The problem with this is that some filesystem types don't support all
# of the various fs layer calls needed, like `flistxattr` or `savedir`
# in the case of the `gvfs` file system.
# Because layer mounts or host mounts are currently setup in the root
# namespace, when we try to archive this subvol, we might run into these
# kinds of mounts. So to work around this, we start a new mount
# namespace, unmount everything that is under this subvol, and then
# run the tar command.
with self.popen_as_root(
[
"unshare",
"--mount",
"bash",
"-c",
# Unmount everything that contains the subvol name, that's the
# dir *above* the `volume/` path.
"(mount |"
f" grep {self.path().dirname().basename()} |"
" xargs umount "
")1>&2; " # Make sure any errors in the umounts go to stderr
"tar c "
"--sparse "
"--one-file-system " # Keep this just in case
"--acls "
"--xattrs "
"--to-stdout "
"-C "
f"{self.path()} "
".",
],
stdout=outfile,
):
yield
def _estimate_content_bytes(self):
"""
Returns a (usually) tight lower-bound guess of the filesystem size
necessary to contain this subvolume. The caller is responsible for
appropriately padding this size when creating the destination FS.
## Future: Query the subvolume qgroup to estimate its size
- If quotas are enabled, this should be an `O(1)` operation
instead of the more costly filesystem tree traversal. NB:
qgroup size estimates tend to run a bit (~1%) lower than `du`,
so growth factors may need a tweak. `_estimate_content_bytes()`
should `log.warning` and fall back to `du` if quotas are
disabled in an older `buck-image-out`. It's also an option to
enable quotas and to trigger a `rescan -w`, but requires more
code/testing.
- Using qgroups for builds is a good stress test of the qgroup
subsystem. It would help us gain confidence in that (a) they
don't trigger overt issues (filesystem corruption, dramatic perf
degradation, or crashes), and that (b) they report reasonably
accurate numbers on I/O-stressed build hosts.
- Should run an A/B test to see if the build perf wins of querying
qgroups exceed the perf hit of having quotas enabled.
- Eventually, we'd enable quotas by default for `buck-image-out`
volumes.
- Need to delete the qgroup whenever we delete a subvolume. Two
main cases: `Subvol.delete` and `subvolume_garbage_collector.py`.
Can check if we are leaking cgroups by building & running &
image tests, and looking to see if that leaves behind 0-sized
cgroups unaffiliated with subvolumes.
- The basic logic for qgroups looks like this:
$ sudo btrfs subvol show buck-image-out/volume/example |
grep 'Subvolume ID'
Subvolume ID: 1432
$ sudo btrfs qgroup show --raw --sync buck-image-out/volume/ |
grep ^0/1432
0/1432 1381523456 16384
# We want the second column, bytes in referenced extents.
# For the `qgroup show` operation, check for **at least** these
# error signals on stderr -- with exit code 1:
ERROR: can't list qgroups: quotas not enabled
# ... and with exit code 0:
WARNING: qgroup data inconsistent, rescan recommended
WARNING: rescan is running, qgroup data may be incorrect
# Moreover, I would have the build fail on any unknown output.
"""
# Not adding `-x` since buck-built subvolumes should not have other
# filesystems mounted inside them.
start_time = time.time()
du_out = subprocess.check_output(
[
"sudo",
"du",
"--block-size=1",
"--summarize",
# Hack alert: `--one-file-system` works around the fact that we
# may have host mounts inside the image, which we mustn't count.
"--one-file-system",
self._path,
]
).split(b"\t", 1)
assert du_out[1] == self._path + b"\n"
size = int(du_out[0])
log.info(
f"`du` estimated size of {self._path} as {size} in "
f"{time.time() - start_time} seconds"
)
return size
# Mocking this allows tests to exercise the fallback "out of space" path.
_OUT_OF_SPACE_SUFFIX = b": No space left on device\n"
def _send_to_loopback_if_fits(
self,
output_path,
fs_size_bytes: int,
loopback_opts: loopback_opts_t
# pyre-fixme[31]: Expression `(int, int)` is not a valid type.
) -> (int, int):
"""
Creates a loopback of the specified size, and sends the current
subvolume to it. Returns a tuple of two values. The first is the number
of bytes which didn't fit in that space. It is zero if the subvolume
fits. The second value is the image size in the end of operation.
"""
open(output_path, "wb").close()
with pipe() as (r_send, w_send), Unshare(
[Namespace.MOUNT, Namespace.PID]
) as ns, BtrfsLoopbackVolume(
unshare=ns,
image_path=output_path,
size_bytes=fs_size_bytes,
loopback_opts=loopback_opts,
) as loop_vol, self.mark_readonly_and_write_sendstream_to_file(
w_send
):
w_send.close() # This end is now fully owned by `btrfs send`.
with r_send:
recv_ret = loop_vol.receive(r_send)
if recv_ret.returncode != 0:
if recv_ret.stderr.endswith(self._OUT_OF_SPACE_SUFFIX):
log.info("Will retry receive, did not fit")
return (
_drain_pipe_return_byte_count(r_send),
loop_vol.get_size(),
)
log.info("Receive failed: {}")
# It's pretty lame to rely on `btrfs receive` continuing
# to be unlocalized, and emitting that particular error
# message, so we fall back to checking available bytes.
size_ret = subprocess.run(
nsenter_as_user(
ns,
# pyre-fixme[6]: Expected `List[Variable[AnyStr <:
# [str, bytes]]]` for 2nd param but got `str`.
"findmnt",
# pyre-fixme[6]: Expected `List[Variable[AnyStr <: [str,
# bytes]]]` for 3rd param but got `str`.
"--noheadings",
# pyre-fixme[6]: Expected `List[Variable[AnyStr <: [str,
# bytes]]]` for 4th param but got `str`.
"--bytes",
# pyre-fixme[6]: Expected `List[Variable[AnyStr <: [str,
# bytes]]]` for 5th param but got `str`.
"--output",
# pyre-fixme[6]: Expected `List[Variable[AnyStr <: [str,
# bytes]]]` for 6th param but got `str`.
"AVAIL",
# pyre-fixme[6]: Expected `List[Variable[AnyStr <: [str,
# bytes]]]` for 7th param but got `Path`.
loop_vol.dir(),
),
stdout=subprocess.PIPE,
)
# If the `findmnt` fails, don't mask the original error.
# If `btrfs receive` fails with "No space left on device",
# a few KBs (up to 32 in my experiments) of free space may
# remain on destination file-system.
if (
size_ret.returncode == 0
and int(size_ret.stdout) <= 32 * KiB
):
log.info(
"Will retry receive, volume "
f"AVAIL={int(size_ret.stdout)}"
)
return (
_drain_pipe_return_byte_count(r_send),
loop_vol.get_size(),
)
# Covering this is hard, so the test plan is "inspection".
log.error( # pragma: no cover
"Unhandled receive stderr:\n\n"
+ recv_ret.stderr.decode(errors="surrogateescape")
)
else:
# Drain the pipe even though we got a positive
# outcome. This is mostly for testing so that
# we don't have to deal with SIGPIPE errors
# from btrfs send when we mock out the actual
# receive.
_drain_pipe_return_byte_count(r_send)
recv_ret.check_returncode()
subvol_path_src = loop_vol.dir() / self.path().basename()
# Optionally change the subvolume name while packaging
subvol_path_dst = (
(loop_vol.dir() / Path(loopback_opts.subvol_name))
if loopback_opts.subvol_name
else subvol_path_src
)
if subvol_path_src != subvol_path_dst:
self.run_as_root(
ns.nsenter_without_sudo(
"mv",
str(subvol_path_src),
str(subvol_path_dst),
),
)
if loopback_opts.writable_subvolume:
run_stdout_to_err(
nsenter_as_root(
ns,
# pyre-fixme[6]: Expected `List[Variable[AnyStr <:
# [str, bytes]]]` for 2nd param but got `str`.
"btrfs",
# pyre-fixme[6]: Expected `List[Variable[AnyStr <: [str,
# bytes]]]` for 3rd param but got `str`.
"property",
# pyre-fixme[6]: Expected `List[Variable[AnyStr <: [str,
# bytes]]]` for 4th param but got `str`.
"set",
# pyre-fixme[6]: Expected `List[Variable[AnyStr <: [str,
# bytes]]]` for 5th param but got `str`.
"-ts",
# pyre-fixme[6]: Expected `List[Variable[AnyStr <: [str,
# bytes]]]` for 6th param but got `Path`.
subvol_path_dst,
# pyre-fixme[6]: Expected `List[Variable[AnyStr <: [str,
# bytes]]]` for 7th param but got `str`.
"ro",
# pyre-fixme[6]: Expected `List[Variable[AnyStr <: [str,
# bytes]]]` for 8th param but got `str`.
"false",
)
).check_returncode()
if loopback_opts.default_subvolume:
# Get the subvolume ID by just listing the specific
# subvol and getting the 2nd element.
# The output of this command looks like:
#
# b'ID 256 gen 7 top level 5 path volume\n'
#
subvol_id = self.run_as_root(
ns.nsenter_without_sudo(
"btrfs",
"subvolume",
"list",
str(subvol_path_dst),
),
stdout=subprocess.PIPE,
).stdout.split(b" ")[1]
log.debug(f"subvol_id to set as default: {subvol_id}")
# Actually set the default
self.run_as_root(
ns.nsenter_without_sudo(
"btrfs",
"subvolume",
"set-default",
subvol_id,
str(loop_vol.dir()),
),
stderr=subprocess.STDOUT,
)
return (
0,
loop_vol.minimize_size()
if not loopback_opts.size_mb
else loop_vol.get_size(),
)
def _send_to_loopback_second_pass(
self,
output_path,
initial_size_bytes,
loopback_opts: loopback_opts_t
# pyre-fixme[31]: Expression `(int, int)` is not a valid type.
) -> (int, int):
size_bytes_to_try = 512 * os.stat(output_path).st_blocks
# we cannot make a loopback that is smaller than MIN_CREATE_BYTES
size_bytes_to_try = (
size_bytes_to_try
if size_bytes_to_try >= MIN_CREATE_BYTES
else MIN_CREATE_BYTES
)
attempts = 0
last_effort = False
while True:
attempts += 1
size_bytes_to_try *= 1.1
if size_bytes_to_try >= initial_size_bytes:
# If we got here we could just use the output of the first pass.
# This is a possible future disk vs time optimization.
size_bytes_to_try = initial_size_bytes
last_effort = True
leftover_bytes, new_size = self._send_to_loopback_if_fits(
output_path, int(size_bytes_to_try), loopback_opts
)
if leftover_bytes != 0:
log.warning(
f"{self._path} did not fit in {size_bytes_to_try} bytes, "
f"{leftover_bytes} sendstream bytes were left over, "
f"attempts {attempts}"
)
# The new size might be larger than `size_bytes_to_try` because
# there is a minimum size for a loopback image. That is
# defined by MIN_CREATE_BYTES. Lets be paranoid
# and check to make sure that we either had to use the
# min size, or we got back the size we were trying.
assert (
int(size_bytes_to_try) < MIN_CREATE_BYTES
or int(size_bytes_to_try) == new_size
)
size_bytes_to_try = new_size
if leftover_bytes == 0 or last_effort:
return (leftover_bytes, new_size)
assert (
attempts <= 10
), f"{attempts} attempts were not enough for {self._path}"
@contextmanager
def receive(self, from_file) -> Iterator[None]:
# At present, we always have an empty wrapper dir to receive into.
# If this changes, we could make a tempdir inside `parent_fd`.
with open_fd(
os.path.dirname(self.path()), os.O_RDONLY | os.O_DIRECTORY
) as parent_fd:
wrapper_dir_contents = os.listdir(parent_fd)
assert wrapper_dir_contents == [], wrapper_dir_contents
try:
with self.popen_as_root(
[
"btrfs",
"receive",
# Future: If we get `pass_fds` support, use
# `/proc/self/fd'
Path("/proc")
/ str(os.getpid())
/ "fd"
/ str(parent_fd),
],
_subvol_exists=False,
stdin=from_file,
):
yield
finally:
received_names = os.listdir(parent_fd)
assert len(received_names) <= 1, received_names
if received_names:
os.rename(
received_names[0],
os.path.basename(self.path()),
src_dir_fd=parent_fd,
dst_dir_fd=parent_fd,
)
# This may be a **partially received** subvol. If these
# semantics turn out to be broken for our purposes, we
# can try to clean up the subvolume on error instead,
# but at present it seems easier to leak it, and let the
# GC code delete it later.
self._mark_created()
def read_path_text(self, relpath: Path) -> str:
return self.path(relpath).read_text()
def read_path_text_as_root(self, relpath: Path) -> str:
# To duplicate the effects of read_path_text(), we need to first check
# for the existence of the file and maybe return FileNotFoundError.
# Otherwise we will end up with a CalledProcessError propagating up.
if not self.path(relpath).exists():
raise FileNotFoundError(relpath)
res = self.run_as_root(
["cat", self.path(relpath)],
text=True,
stdout=subprocess.PIPE,
)
res.check_returncode()
return res.stdout
def overwrite_path_as_root(self, relpath: Path, contents: AnyStr):
# Future: support setting user, group, and mode
if isinstance(contents, str):
contents = contents.encode()
self.run_as_root(
["tee", self.path(relpath)],
input=contents,
stdout=subprocess.DEVNULL,
).check_returncode()
def with_temp_subvols(method):
"""
A test that needs a TempSubvolumes instance should use this decorator.
This is a cleaner alternative to doing this in setUp:
self.temp_subvols.__enter__()
self.addCleanup(self.temp_subvols.__exit__, None, None, None)
The primary reason this is bad is explained in the TempSubvolumes
docblock. It also fails to pass exception info to the __exit__.
"""
@functools.wraps(method)
def decorated(self, *args, **kwargs):
with TempSubvolumes(Path(sys.argv[0])) as temp_subvols:
return method(self, temp_subvols, *args, **kwargs)
return decorated
# NB: Memoizing this function would be pretty reasonable.
def volume_dir(path_in_repo: Optional[Path] = None) -> Path:
return find_artifacts_dir(path_in_repo) / "volume"
class TempSubvolumes:
"""
Tracks the subvolumes it creates, and destroys them on context exit.
BEST PRACTICES:
- To nest a subvol inside one made by `TempSubvolumes`, create it
via `Subvol` -- bypassing `TempSubvolumes`. It is better to let it
be cleaned up implicitly. If you request explicit cleanup by using
a `TempSubvolumes` method, the inner subvol would have to be deleted
first, which would break if the parent is read-only. See an example
in `test_temp_subvolumes_create` (marked by "NB").
- Avoid using `unittest.TestCase.addCleanup` to `__exit__()` this
context. Instead, use `@with_temp_subvols` on each test method.
`addCleanup` is unreliable -- e.g. clean-up is NOT triggered on
KeyboardInterrupt. Therefore, this **will** leak subvolumes during
development. For manual cleanup:
sudo btrfs sub del buck-image-out/volume/tmp/TempSubvolumes_*/subvol &&
rmdir buck-image-out/volume/tmp/TempSubvolumes_*
Instead of polluting `buck-image-out/volume`, it would be possible to
put these on a separate `BtrfsLoopbackVolume`, to rely on `Unshare` to
guarantee unmounting it, and to rely on `tmpwatch` to delete the stale
loopbacks from `/tmp/`. At present, this doesn't seem worthwhile since
it would require using an `Unshare` object throughout `Subvol`.
"""
def __init__(self, path_in_repo: Optional[Path] = None):
super().__init__()
# The 'tmp' subdirectory simplifies cleanup of leaked temp subvolumes
volume_tmp_dir = volume_dir(path_in_repo) / "tmp"
try:
os.mkdir(volume_tmp_dir)
except FileExistsError:
pass
self._stack = ExitStack()
self._temp_dir_ctx = temp_dir(
dir=volume_tmp_dir.decode(), prefix=self.__class__.__name__ + "_"
)
def __enter__(self):
self._stack.__enter__()
self._temp_dir = self._stack.enter_context(self._temp_dir_ctx)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._temp_dir = None
return self._stack.__exit__(exc_type, exc_val, exc_tb)
def _new_subvol(self, subvol):
return self._stack.enter_context(subvol.delete_on_exit())
@property
def temp_dir(self):
return self._temp_dir
def _prep_rel_path(self, rel_path: AnyStr) -> Path:
"""
Ensures subvolumes live under our temporary directory, which
improves safety, since its permissions ought to be u+rwx to avoid
exposing setuid binaries inside the built subvolumes.
"""
rel_path = (
(self.temp_dir / rel_path)
.realpath()
.relpath(self.temp_dir.realpath())
)
if rel_path.has_leading_dot_dot():
raise AssertionError(
f"{rel_path} must be a subdirectory of {self.temp_dir}"
)
abs_path = self.temp_dir / rel_path
os.makedirs(abs_path.dirname(), exist_ok=True)
return abs_path
def create(self, rel_path: AnyStr) -> Subvol:
return self._new_subvol(Subvol(self._prep_rel_path(rel_path)).create())
def snapshot(self, source: Subvol, dest_rel_path: AnyStr) -> Subvol:
return self._new_subvol(
Subvol(self._prep_rel_path(dest_rel_path)).snapshot(source)
)
def caller_will_create(self, rel_path: AnyStr) -> Subvol:
return self._new_subvol(Subvol(self._prep_rel_path(rel_path)))
def get_subvolumes_dir(
path_in_repo: Optional[Path] = None,
) -> Path:
return volume_dir(path_in_repo) / "targets"
def find_subvolume_on_disk(
layer_output: str,
# pyre-fixme[9]: path_in_repo has type `Path`; used as `None`.
path_in_repo: Path = None,
# pyre-fixme[9]: subvolumes_dir has type `Path`; used as `None`.
subvolumes_dir: Path = None,
) -> SubvolumeOnDisk:
# It's OK for both to be None (uses the current file to find repo), but
# it's not OK to set both.
assert (path_in_repo is None) or (subvolumes_dir is None)
with open(Path(layer_output) / "layer.json") as infile:
return SubvolumeOnDisk.from_json_file(
infile, subvolumes_dir or get_subvolumes_dir(path_in_repo)
)
| 44.427673
| 161
| 0.580927
|
6fdd2848b27c6e4876044a9445fcc1160f388ae1
| 11,486
|
py
|
Python
|
PaddleCV/face_detection/reader.py
|
FrancisLiang/models-1
|
e14d5bc1ab36d0dd11977f27cff54605bf99c945
|
[
"Apache-2.0"
] | 2
|
2021-09-13T06:48:23.000Z
|
2021-09-13T06:48:28.000Z
|
PaddleCV/face_detection/reader.py
|
FrancisLiang/models-1
|
e14d5bc1ab36d0dd11977f27cff54605bf99c945
|
[
"Apache-2.0"
] | null | null | null |
PaddleCV/face_detection/reader.py
|
FrancisLiang/models-1
|
e14d5bc1ab36d0dd11977f27cff54605bf99c945
|
[
"Apache-2.0"
] | 1
|
2019-08-05T11:32:13.000Z
|
2019-08-05T11:32:13.000Z
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from PIL import Image
from PIL import ImageDraw
import numpy as np
import xml.etree.ElementTree
import os
import time
import copy
import random
import cv2
import six
import math
from itertools import islice
import paddle
import image_util
class Settings(object):
def __init__(self,
dataset=None,
data_dir=None,
label_file=None,
resize_h=None,
resize_w=None,
mean_value=[104., 117., 123.],
apply_distort=True,
apply_expand=True,
ap_version='11point',
toy=0):
self.dataset = dataset
self.ap_version = ap_version
self.toy = toy
self.data_dir = data_dir
self.apply_distort = apply_distort
self.apply_expand = apply_expand
self.resize_height = resize_h
self.resize_width = resize_w
self.img_mean = np.array(mean_value)[:, np.newaxis, np.newaxis].astype(
'float32')
self.expand_prob = 0.5
self.expand_max_ratio = 4
self.hue_prob = 0.5
self.hue_delta = 18
self.contrast_prob = 0.5
self.contrast_delta = 0.5
self.saturation_prob = 0.5
self.saturation_delta = 0.5
self.brightness_prob = 0.5
# _brightness_delta is the normalized value by 256
self.brightness_delta = 0.125
self.scale = 0.007843 # 1 / 127.5
self.data_anchor_sampling_prob = 0.5
self.min_face_size = 8.0
def to_chw_bgr(image):
"""
Transpose image from HWC to CHW and from RBG to BGR.
Args:
image (np.array): an image with HWC and RBG layout.
"""
# HWC to CHW
if len(image.shape) == 3:
image = np.swapaxes(image, 1, 2)
image = np.swapaxes(image, 1, 0)
# RBG to BGR
image = image[[2, 1, 0], :, :]
return image
def preprocess(img, bbox_labels, mode, settings, image_path):
img_width, img_height = img.size
sampled_labels = bbox_labels
if mode == 'train':
if settings.apply_distort:
img = image_util.distort_image(img, settings)
if settings.apply_expand:
img, bbox_labels, img_width, img_height = image_util.expand_image(
img, bbox_labels, img_width, img_height, settings)
# sampling
batch_sampler = []
prob = np.random.uniform(0., 1.)
if prob > settings.data_anchor_sampling_prob:
scale_array = np.array([16, 32, 64, 128, 256, 512])
batch_sampler.append(
image_util.sampler(1, 10, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.2,
0.0, True))
sampled_bbox = image_util.generate_batch_random_samples(
batch_sampler, bbox_labels, img_width, img_height, scale_array,
settings.resize_width, settings.resize_height)
img = np.array(img)
if len(sampled_bbox) > 0:
idx = int(np.random.uniform(0, len(sampled_bbox)))
img, sampled_labels = image_util.crop_image_sampling(
img, bbox_labels, sampled_bbox[idx], img_width, img_height,
settings.resize_width, settings.resize_height,
settings.min_face_size)
img = img.astype('uint8')
img = Image.fromarray(img)
else:
# hard-code here
batch_sampler.append(
image_util.sampler(1, 50, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0,
0.0, True))
batch_sampler.append(
image_util.sampler(1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0,
0.0, True))
batch_sampler.append(
image_util.sampler(1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0,
0.0, True))
batch_sampler.append(
image_util.sampler(1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0,
0.0, True))
batch_sampler.append(
image_util.sampler(1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0,
0.0, True))
sampled_bbox = image_util.generate_batch_samples(
batch_sampler, bbox_labels, img_width, img_height)
img = np.array(img)
if len(sampled_bbox) > 0:
idx = int(np.random.uniform(0, len(sampled_bbox)))
img, sampled_labels = image_util.crop_image(
img, bbox_labels, sampled_bbox[idx], img_width, img_height,
settings.resize_width, settings.resize_height,
settings.min_face_size)
img = Image.fromarray(img)
interp_mode = [
Image.BILINEAR, Image.HAMMING, Image.NEAREST, Image.BICUBIC,
Image.LANCZOS
]
interp_indx = np.random.randint(0, 5)
img = img.resize(
(settings.resize_width, settings.resize_height),
resample=interp_mode[interp_indx])
img = np.array(img)
if mode == 'train':
mirror = int(np.random.uniform(0, 2))
if mirror == 1:
img = img[:, ::-1, :]
for i in six.moves.xrange(len(sampled_labels)):
tmp = sampled_labels[i][1]
sampled_labels[i][1] = 1 - sampled_labels[i][3]
sampled_labels[i][3] = 1 - tmp
img = to_chw_bgr(img)
img = img.astype('float32')
img -= settings.img_mean
img = img * settings.scale
return img, sampled_labels
def load_file_list(input_txt):
with open(input_txt, 'r') as f_dir:
lines_input_txt = f_dir.readlines()
file_dict = {}
num_class = 0
for i in range(len(lines_input_txt)):
line_txt = lines_input_txt[i].strip('\n\t\r')
if '--' in line_txt:
if i != 0:
num_class += 1
file_dict[num_class] = []
file_dict[num_class].append(line_txt)
if '--' not in line_txt:
if len(line_txt) > 6:
split_str = line_txt.split(' ')
x1_min = float(split_str[0])
y1_min = float(split_str[1])
x2_max = float(split_str[2])
y2_max = float(split_str[3])
line_txt = str(x1_min) + ' ' + str(y1_min) + ' ' + str(
x2_max) + ' ' + str(y2_max)
file_dict[num_class].append(line_txt)
else:
file_dict[num_class].append(line_txt)
return list(file_dict.values())
def expand_bboxes(bboxes,
expand_left=2.,
expand_up=2.,
expand_right=2.,
expand_down=2.):
"""
Expand bboxes, expand 2 times by defalut.
"""
expand_boxes = []
for bbox in bboxes:
xmin = bbox[0]
ymin = bbox[1]
xmax = bbox[2]
ymax = bbox[3]
w = xmax - xmin
h = ymax - ymin
ex_xmin = max(xmin - w / expand_left, 0.)
ex_ymin = max(ymin - h / expand_up, 0.)
ex_xmax = min(xmax + w / expand_right, 1.)
ex_ymax = min(ymax + h / expand_down, 1.)
expand_boxes.append([ex_xmin, ex_ymin, ex_xmax, ex_ymax])
return expand_boxes
def train_generator(settings, file_list, batch_size, shuffle=True):
def reader():
if shuffle:
np.random.shuffle(file_list)
batch_out = []
for item in file_list:
image_name = item[0]
image_path = os.path.join(settings.data_dir, image_name)
im = Image.open(image_path)
if im.mode == 'L':
im = im.convert('RGB')
im_width, im_height = im.size
# layout: label | xmin | ymin | xmax | ymax
bbox_labels = []
for index_box in range(len(item)):
if index_box >= 2:
bbox_sample = []
temp_info_box = item[index_box].split(' ')
xmin = float(temp_info_box[0])
ymin = float(temp_info_box[1])
w = float(temp_info_box[2])
h = float(temp_info_box[3])
# Filter out wrong labels
if w < 0 or h < 0:
continue
xmax = xmin + w
ymax = ymin + h
bbox_sample.append(1)
bbox_sample.append(float(xmin) / im_width)
bbox_sample.append(float(ymin) / im_height)
bbox_sample.append(float(xmax) / im_width)
bbox_sample.append(float(ymax) / im_height)
bbox_labels.append(bbox_sample)
im, sample_labels = preprocess(im, bbox_labels, "train", settings,
image_path)
sample_labels = np.array(sample_labels)
if len(sample_labels) == 0: continue
im = im.astype('float32')
face_box = sample_labels[:, 1:5]
head_box = expand_bboxes(face_box)
label = [1] * len(face_box)
batch_out.append((im, face_box, head_box, label))
if len(batch_out) == batch_size:
yield batch_out
batch_out = []
return reader
def train(settings, file_list, batch_size, shuffle=True, num_workers=8):
file_lists = load_file_list(file_list)
n = int(math.ceil(len(file_lists) // num_workers))
split_lists = [file_lists[i:i + n] for i in range(0, len(file_lists), n)]
readers = []
for iterm in split_lists:
readers.append(train_generator(settings, iterm, batch_size, shuffle))
return paddle.reader.multiprocess_reader(readers, False)
def test(settings, file_list):
file_lists = load_file_list(file_list)
def reader():
for image in file_lists:
image_name = image[0]
image_path = os.path.join(settings.data_dir, image_name)
im = Image.open(image_path)
if im.mode == 'L':
im = im.convert('RGB')
yield im, image_path
return reader
def infer(settings, image_path):
def batch_reader():
img = Image.open(image_path)
if img.mode == 'L':
img = img.convert('RGB')
im_width, im_height = img.size
if settings.resize_width and settings.resize_height:
img = img.resize((settings.resize_width, settings.resize_height),
Image.ANTIALIAS)
img = np.array(img)
img = to_chw_bgr(img)
img = img.astype('float32')
img -= settings.img_mean
img = img * settings.scale
return np.array([img])
return batch_reader
| 35.341538
| 79
| 0.555024
|
47c0202c47c940e1944ff5f13a0699805b520031
| 12,474
|
py
|
Python
|
nose2/tests/unit/test_junitxml.py
|
deeplow/nose2
|
eb0394160e24afe760e984d93dbece8351dbae7a
|
[
"BSD-2-Clause"
] | 637
|
2015-01-12T02:02:53.000Z
|
2022-03-30T19:47:48.000Z
|
nose2/tests/unit/test_junitxml.py
|
deeplow/nose2
|
eb0394160e24afe760e984d93dbece8351dbae7a
|
[
"BSD-2-Clause"
] | 276
|
2015-01-02T19:14:06.000Z
|
2022-03-18T04:03:08.000Z
|
nose2/tests/unit/test_junitxml.py
|
deeplow/nose2
|
eb0394160e24afe760e984d93dbece8351dbae7a
|
[
"BSD-2-Clause"
] | 127
|
2015-01-08T12:02:10.000Z
|
2022-01-10T20:52:29.000Z
|
import logging
import os
import sys
import unittest
from xml.etree import ElementTree as ET
import six
from nose2 import events, loader, result, session, tools
from nose2.plugins import junitxml, logcapture
from nose2.plugins.loader import functions, generators, parameters, testcases
from nose2.tests._common import TestCase
class TestJunitXmlPlugin(TestCase):
_RUN_IN_TEMP = True
BAD_FOR_XML_U = six.u("A\x07 B\x0B C\x10 D\uD900 " "E\uFFFE F\x80 G\x90 H\uFDDD")
# UTF-8 string with double null (invalid)
BAD_FOR_XML_B = six.b(
"A\x07 B\x0b C\x10 D\xed\xa4\x80 "
"E\xef\xbf\xbe F\xc2\x80 G\xc2\x90 H\xef\xb7\x9d "
"\x00\x00"
)
# "byte" strings in PY2 and unicode in py3 works as expected will
# will translate surrogates into UTF-16 characters so BAD_FOR_XML_U
# should have 8 letters follows by 0xFFFD, but only 4 when keeping
# the discouraged/restricted ranges. Respectively:
# "A\uFFFD B\uFFFD C\uFFFD D\uFFFD E\uFFFD F\uFFFD G\uFFFD H\uFFFD"
# "A\uFFFD B\uFFFD C\uFFFD D\uFFFD E\uFFFD F\x80 G\x90 H\uFDDD"
#
# In Python 2 Invalid ascii characters seem to get escaped out as part
# of tracebace.format_traceback so full and partial replacements are:
# "A\uFFFD B\uFFFD C\uFFFD D\\\\ud900 E\\\\ufffe F\\\\x80 G\\\\x90 H\\\\ufddd"
# "A\uFFFD B\uFFFD C\uFFFD D\\\\ud900 E\\\\ufffe F\\\\x80 G\\\\x90 H\\\\ufddd"
#
# Byte strings in py3 as errors are replaced by their representation string
# So these will be safe and not have any replacements
# "b'A\\x07 B\\x0b C\\x10 D\\xed\\xa4\\x80 E\\xef\\xbf\\xbe F\\xc2\\x80
# G\\xc2\\x90 H\\xef\\xb7\\x9d \\x00\\x00"
if sys.maxunicode <= 0xFFFF:
EXPECTED_RE = six.u("^[\x09\x0A\x0D\x20\x21-\uD7FF\uE000-\uFFFD]*$")
EXPECTED_RE_SAFE = six.u(
"^[\x09\x0A\x0D\x20\x21-\x7E\x85" "\xA0-\uD7FF\uE000-\uFDCF\uFDF0-\uFFFD]*$"
)
else:
EXPECTED_RE = six.u(
"^[\x09\x0A\x0D\x20\x21-\uD7FF\uE000-\uFFFD" "\u10000-\u10FFFF]*$"
)
EXPECTED_RE_SAFE = six.u(
"^[\x09\x0A\x0D\x20\x21-\x7E\x85"
"\xA0-\uD7FF\uE000-\uFDCF\uFDF0-\uFFFD"
"\u10000-\u1FFFD\u20000-\u2FFFD"
"\u30000-\u3FFFD\u40000-\u4FFFD"
"\u50000-\u5FFFD\u60000-\u6FFFD"
"\u70000-\u7FFFD\u80000-\u8FFFD"
"\u90000-\u8FFFD\uA0000-\uAFFFD"
"\uB0000-\uBFFFD\uC0000-\uCFFFD"
"\uD0000-\uDFFFD\uE0000-\uEFFFD"
"\uF0000-\uFFFFD\u100000-\u10FFFD]*$"
)
def setUp(self):
super(TestJunitXmlPlugin, self).setUp()
self.session = session.Session()
self.loader = loader.PluggableTestLoader(self.session)
self.result = result.PluggableTestResult(self.session)
self.plugin = junitxml.JUnitXmlReporter(session=self.session)
self.plugin.register()
# Python 2.7 needs this
# assertRegexpMatches() was renamed to assertRegex() in 3.2
if not hasattr(self, "assertRegex"):
self.assertRegex = self.assertRegexpMatches
class Test(unittest.TestCase):
def test(self):
pass
def test_chdir(self):
TEMP_SUBFOLDER = "test_chdir"
os.mkdir(TEMP_SUBFOLDER)
os.chdir(TEMP_SUBFOLDER)
def test_fail(self):
assert False
def test_err(self):
1 / 0
def test_skip(self):
self.skipTest("skip")
def test_skip_no_reason(self):
self.skipTest("")
def test_bad_xml(self):
raise RuntimeError(TestJunitXmlPlugin.BAD_FOR_XML_U)
def test_bad_xml_b(self):
raise RuntimeError(TestJunitXmlPlugin.BAD_FOR_XML_B)
def test_gen(self):
def check(a, b):
self.assertEqual(a, b)
yield check, 1, 1
yield check, 1, 2
@tools.params(1, 2, 3)
def test_params(self, p):
self.assertEqual(p, 2)
def test_with_log(self):
logging.info("log message")
self.case = Test
def test_success_added_to_xml(self):
test = self.case("test")
test(self.result)
self.assertEqual(self.plugin.numtests, 1)
self.assertEqual(len(self.plugin.tree.findall("testcase")), 1)
def test_failure_includes_traceback(self):
test = self.case("test_fail")
test(self.result)
case = self.plugin.tree.find("testcase")
failure = case.find("failure")
assert failure is not None
assert "Traceback" in failure.text
def test_error_bad_xml(self):
self.plugin.keep_restricted = False
test = self.case("test_bad_xml")
test(self.result)
case = self.plugin.tree.find("testcase")
error = case.find("error")
self.assertRegex(error.text, self.EXPECTED_RE_SAFE)
def test_error_bad_xml_keep(self):
self.plugin.keep_restricted = True
test = self.case("test_bad_xml")
test(self.result)
case = self.plugin.tree.find("testcase")
error = case.find("error")
self.assertRegex(error.text, self.EXPECTED_RE)
def test_error_bad_xml_b(self):
self.plugin.keep_restricted = False
test = self.case("test_bad_xml_b")
test(self.result)
case = self.plugin.tree.find("testcase")
error = case.find("error")
assert error is not None
self.assertRegex(error.text, self.EXPECTED_RE_SAFE)
def test_error_bad_xml_b_keep(self):
self.plugin.keep_restricted = True
test = self.case("test_bad_xml_b")
test(self.result)
case = self.plugin.tree.find("testcase")
error = case.find("error")
assert error is not None
self.assertRegex(error.text, self.EXPECTED_RE)
def test_error_includes_traceback(self):
test = self.case("test_err")
test(self.result)
case = self.plugin.tree.find("testcase")
error = case.find("error")
assert error is not None
assert "Traceback" in error.text
def test_skip_includes_skipped(self):
test = self.case("test_skip")
test(self.result)
case = self.plugin.tree.find("testcase")
skip = case.find("skipped")
assert skip is not None
self.assertEqual(skip.get("message"), "test skipped: skip")
self.assertEqual(skip.text, "skip")
def test_skip_includes_skipped_no_reason(self):
test = self.case("test_skip_no_reason")
test(self.result)
case = self.plugin.tree.find("testcase")
skip = case.find("skipped")
assert skip is not None
self.assertIsNone(skip.get("message"))
self.assertIsNone(skip.text)
def test_generator_test_name_correct(self):
gen = generators.Generators(session=self.session)
gen.register()
event = events.LoadFromTestCaseEvent(self.loader, self.case)
self.session.hooks.loadTestsFromTestCase(event)
cases = event.extraTests
for case in cases:
case(self.result)
xml = self.plugin.tree.findall("testcase")
self.assertEqual(len(xml), 2)
self.assertEqual(xml[0].get("name"), "test_gen:1")
self.assertEqual(xml[1].get("name"), "test_gen:2")
def test_generator_test_full_name_correct(self):
gen = generators.Generators(session=self.session)
gen.register()
self.plugin.test_fullname = True
event = events.LoadFromTestCaseEvent(self.loader, self.case)
self.session.hooks.loadTestsFromTestCase(event)
cases = event.extraTests
for case in cases:
case(self.result)
xml = self.plugin.tree.findall("testcase")
self.assertEqual(len(xml), 2)
self.assertEqual(xml[0].get("name"), "test_gen:1 (1, 1)")
self.assertEqual(xml[1].get("name"), "test_gen:2 (1, 2)")
def test_function_classname_is_module(self):
fun = functions.Functions(session=self.session)
fun.register()
def test_func():
pass
cases = fun._createTests(test_func)
self.assertEqual(len(cases), 1)
cases[0](self.result)
xml = self.plugin.tree.findall("testcase")
self.assertEqual(len(xml), 1)
self.assertTrue(xml[0].get("classname").endswith("test_junitxml"))
def test_params_test_name_correct(self):
# param test loading is a bit more complex than generator
# loading. XXX -- can these be reconciled so they both
# support exclude and also both support loadTestsFromTestCase?
plug1 = parameters.Parameters(session=self.session)
plug1.register()
plug2 = testcases.TestCaseLoader(session=self.session)
plug2.register()
# need module to fire top-level event
class Mod(object):
pass
m = Mod()
m.Test = self.case
event = events.LoadFromModuleEvent(self.loader, m)
self.session.hooks.loadTestsFromModule(event)
for case in event.extraTests:
case(self.result)
xml = self.plugin.tree.findall("testcase")
self.assertEqual(len(xml), 13)
params = [x for x in xml if x.get("name").startswith("test_params")]
self.assertEqual(len(params), 3)
self.assertEqual(params[0].get("name"), "test_params:1")
self.assertEqual(params[1].get("name"), "test_params:2")
self.assertEqual(params[2].get("name"), "test_params:3")
def test_params_test_full_name_correct(self):
plug1 = parameters.Parameters(session=self.session)
plug1.register()
plug2 = testcases.TestCaseLoader(session=self.session)
plug2.register()
# need module to fire top-level event
class Mod(object):
pass
m = Mod()
m.Test = self.case
event = events.LoadFromModuleEvent(self.loader, m)
self.plugin.test_fullname = True
self.session.hooks.loadTestsFromModule(event)
for case in event.extraTests:
case(self.result)
xml = self.plugin.tree.findall("testcase")
self.assertEqual(len(xml), 13)
params = [x for x in xml if x.get("name").startswith("test_params")]
self.assertEqual(len(params), 3)
self.assertEqual(params[0].get("name"), "test_params:1 (1)")
self.assertEqual(params[1].get("name"), "test_params:2 (2)")
self.assertEqual(params[2].get("name"), "test_params:3 (3)")
def test_writes_xml_file_at_end(self):
test = self.case("test")
test(self.result)
event = events.StopTestRunEvent(None, self.result, 1, 1)
self.plugin.stopTestRun(event)
with open(self.plugin.path, "r") as fh:
tree = ET.parse(fh).getroot()
self.assertEqual(len(tree.findall("testcase")), 1)
case = tree.find("testcase")
assert "time" in case.attrib
assert "classname" in case.attrib
self.assertEqual(case.get("name"), "test")
self.assertEqual(tree.get("errors"), "0")
self.assertEqual(tree.get("failures"), "0")
self.assertEqual(tree.get("skipped"), "0")
self.assertEqual(tree.get("tests"), "1")
assert "time" in tree.attrib
def test_xml_file_path_is_not_affected_by_chdir_in_test(self):
inital_dir = os.getcwd()
test = self.case("test_chdir")
test(self.result)
self.assertEqual(
inital_dir, os.path.dirname(os.path.realpath(self.plugin.path))
)
def test_xml_contains_empty_system_out_without_logcapture(self):
test = self.case("test_with_log")
test(self.result)
case = self.plugin.tree.find("testcase")
system_out = case.find("system-out")
assert system_out is not None
assert not system_out.text
def test_xml_contains_log_message_in_system_out_with_logcapture(self):
self.logcapture_plugin = logcapture.LogCapture(session=self.session)
self.logcapture_plugin.register()
test = self.case("test_with_log")
test(self.result)
case = self.plugin.tree.find("testcase")
system_out = case.find("system-out")
assert system_out is not None
assert "log message" in system_out.text
assert "INFO" in system_out.text
| 37.347305
| 88
| 0.619929
|
7e8a0f4711924eac5ecc2c462974277910cb215c
| 24,225
|
py
|
Python
|
ironic/tests/unit/api/controllers/v1/test_driver.py
|
inmotionhosting/ironic
|
1c7b5f82592e23ab66dddca56e0b059d3cb0710b
|
[
"Apache-2.0"
] | null | null | null |
ironic/tests/unit/api/controllers/v1/test_driver.py
|
inmotionhosting/ironic
|
1c7b5f82592e23ab66dddca56e0b059d3cb0710b
|
[
"Apache-2.0"
] | null | null | null |
ironic/tests/unit/api/controllers/v1/test_driver.py
|
inmotionhosting/ironic
|
1c7b5f82592e23ab66dddca56e0b059d3cb0710b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from http import client as http_client
import json
from unittest import mock
from oslo_config import cfg
from testtools import matchers
from ironic.api.controllers import base as api_base
from ironic.api.controllers.v1 import driver
from ironic.api.controllers.v1 import versions as api_versions
from ironic.common import exception
from ironic.conductor import rpcapi
from ironic.drivers import base as driver_base
from ironic.tests.unit.api import base
class TestListDrivers(base.BaseApiTest):
hw1 = 'fake-hardware-type'
hw2 = 'fake-hardware-type-2'
h1 = 'fake-host1'
h2 = 'fake-host2'
def register_fake_conductors(self):
c1 = self.dbapi.register_conductor({
'hostname': self.h1, 'drivers': [],
})
c2 = self.dbapi.register_conductor({
'hostname': self.h2, 'drivers': [],
})
for c in (c1, c2):
self.dbapi.register_conductor_hardware_interfaces(
c.id, self.hw1, 'deploy', ['iscsi', 'direct'], 'direct')
self.dbapi.register_conductor_hardware_interfaces(
c1.id, self.hw2, 'deploy', ['iscsi', 'direct'], 'direct')
def _test_drivers(self, use_dynamic, detail=False, latest_if=False):
self.register_fake_conductors()
headers = {}
expected = [
{'name': self.hw1, 'hosts': [self.h1, self.h2], 'type': 'dynamic'},
{'name': self.hw2, 'hosts': [self.h1], 'type': 'dynamic'},
]
expected = sorted(expected, key=lambda d: d['name'])
if use_dynamic:
if latest_if:
headers[api_base.Version.string] = \
api_versions.max_version_string()
else:
headers[api_base.Version.string] = '1.30'
path = '/drivers'
if detail:
path += '?detail=True'
data = self.get_json(path, headers=headers)
self.assertEqual(len(expected), len(data['drivers']))
drivers = sorted(data['drivers'], key=lambda d: d['name'])
for i in range(len(expected)):
d = drivers[i]
e = expected[i]
self.assertEqual(e['name'], d['name'])
self.assertEqual(sorted(e['hosts']), sorted(d['hosts']))
self.validate_link(d['links'][0]['href'])
self.validate_link(d['links'][1]['href'])
if use_dynamic:
self.assertEqual(e['type'], d['type'])
# NOTE(jroll) we don't test detail=True with use_dynamic=False
# as this case can't actually happen.
if detail:
self.assertIn('default_deploy_interface', d)
if latest_if:
self.assertIn('default_rescue_interface', d)
self.assertIn('enabled_rescue_interfaces', d)
self.assertIn('default_storage_interface', d)
self.assertIn('enabled_storage_interfaces', d)
else:
self.assertNotIn('default_rescue_interface', d)
self.assertNotIn('enabled_rescue_interfaces', d)
self.assertNotIn('default_storage_interface', d)
self.assertNotIn('enabled_storage_interfaces', d)
else:
# ensure we don't spill these fields into driver listing
# one should be enough
self.assertNotIn('default_deploy_interface', d)
def test_drivers(self):
self._test_drivers(False)
def test_drivers_with_dynamic(self):
self._test_drivers(True)
def _test_drivers_with_dynamic_detailed(self, latest_if=False):
with mock.patch.object(self.dbapi, 'list_hardware_type_interfaces',
autospec=True) as mock_hw:
mock_hw.return_value = [
{
'hardware_type': self.hw1,
'interface_type': 'deploy',
'interface_name': 'iscsi',
'default': False,
},
{
'hardware_type': self.hw1,
'interface_type': 'deploy',
'interface_name': 'direct',
'default': True,
},
]
self._test_drivers(True, detail=True, latest_if=latest_if)
def test_drivers_with_dynamic_detailed(self):
self._test_drivers_with_dynamic_detailed()
def test_drivers_with_dynamic_detailed_storage_interface(self):
self._test_drivers_with_dynamic_detailed(latest_if=True)
def test_drivers_type_filter_classic(self):
self.register_fake_conductors()
headers = {api_base.Version.string: '1.30'}
data = self.get_json('/drivers?type=classic', headers=headers)
self.assertEqual([], data['drivers'])
def test_drivers_type_filter_dynamic(self):
self.register_fake_conductors()
headers = {api_base.Version.string: '1.30'}
data = self.get_json('/drivers?type=dynamic', headers=headers)
self.assertNotEqual([], data['drivers'])
for d in data['drivers']:
# just check it's the right type, other tests handle the rest
self.assertEqual('dynamic', d['type'])
def test_drivers_type_filter_bad_version(self):
headers = {api_base.Version.string: '1.29'}
data = self.get_json('/drivers?type=classic',
headers=headers,
expect_errors=True)
self.assertEqual(http_client.NOT_ACCEPTABLE, data.status_code)
def test_drivers_type_filter_bad_value(self):
headers = {api_base.Version.string: '1.30'}
data = self.get_json('/drivers?type=working',
headers=headers,
expect_errors=True)
self.assertEqual(http_client.BAD_REQUEST, data.status_code)
def test_drivers_detail_bad_version(self):
headers = {api_base.Version.string: '1.29'}
data = self.get_json('/drivers?detail=True',
headers=headers,
expect_errors=True)
self.assertEqual(http_client.NOT_ACCEPTABLE, data.status_code)
def test_drivers_detail_bad_version_false(self):
headers = {api_base.Version.string: '1.29'}
data = self.get_json('/drivers?detail=False',
headers=headers,
expect_errors=True)
self.assertEqual(http_client.NOT_ACCEPTABLE, data.status_code)
def test_drivers_no_active_conductor(self):
data = self.get_json('/drivers')
self.assertThat(data['drivers'], matchers.HasLength(0))
self.assertEqual([], data['drivers'])
@mock.patch.object(rpcapi.ConductorAPI, 'get_driver_properties')
def _test_drivers_get_one_ok(self, mock_driver_properties,
latest_if=False):
# get_driver_properties mock is required by validate_link()
self.register_fake_conductors()
driver = self.hw1
driver_type = 'dynamic'
hosts = [self.h1, self.h2]
headers = {}
if latest_if:
headers[api_base.Version.string] = \
api_versions.max_version_string()
else:
headers[api_base.Version.string] = '1.30'
data = self.get_json('/drivers/%s' % driver,
headers=headers)
self.assertEqual(driver, data['name'])
self.assertEqual(sorted(hosts), sorted(data['hosts']))
self.assertIn('properties', data)
self.assertEqual(driver_type, data['type'])
for iface in driver_base.ALL_INTERFACES:
if iface != 'bios':
if latest_if or iface not in ['rescue', 'storage']:
self.assertIn('default_%s_interface' % iface, data)
self.assertIn('enabled_%s_interfaces' % iface, data)
self.assertIsNotNone(data['default_deploy_interface'])
self.assertIsNotNone(data['enabled_deploy_interfaces'])
self.validate_link(data['links'][0]['href'])
self.validate_link(data['links'][1]['href'])
self.validate_link(data['properties'][0]['href'])
self.validate_link(data['properties'][1]['href'])
def _test_drivers_get_one_ok_dynamic(self, latest_if=False):
with mock.patch.object(self.dbapi, 'list_hardware_type_interfaces',
autospec=True) as mock_hw:
mock_hw.return_value = [
{
'hardware_type': self.hw1,
'interface_type': 'deploy',
'interface_name': 'iscsi',
'default': False,
},
{
'hardware_type': self.hw1,
'interface_type': 'deploy',
'interface_name': 'direct',
'default': True,
},
]
self._test_drivers_get_one_ok(latest_if=latest_if)
mock_hw.assert_called_once_with([self.hw1])
def test_drivers_get_one_ok_dynamic_base_interfaces(self):
self._test_drivers_get_one_ok_dynamic()
def test_drivers_get_one_ok_dynamic_latest_interfaces(self):
self._test_drivers_get_one_ok_dynamic(latest_if=True)
def test_driver_properties_hidden_in_lower_version(self):
self.register_fake_conductors()
data = self.get_json('/drivers/%s' % self.hw1,
headers={api_base.Version.string: '1.8'})
self.assertNotIn('properties', data)
def test_driver_type_hidden_in_lower_version(self):
self.register_fake_conductors()
data = self.get_json('/drivers/%s' % self.hw1,
headers={api_base.Version.string: '1.14'})
self.assertNotIn('type', data)
def test_drivers_get_one_not_found(self):
response = self.get_json('/drivers/nope', expect_errors=True)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
def _test_links(self, public_url=None):
cfg.CONF.set_override('public_endpoint', public_url, 'api')
self.register_fake_conductors()
data = self.get_json('/drivers/%s' % self.hw1)
self.assertIn('links', data)
self.assertEqual(2, len(data['links']))
self.assertIn(self.hw1, data['links'][0]['href'])
for link in data['links']:
bookmark = link['rel'] == 'bookmark'
self.assertTrue(self.validate_link(link['href'],
bookmark=bookmark))
if public_url is not None:
expected = [{'href': '%s/v1/drivers/%s' % (public_url, self.hw1),
'rel': 'self'},
{'href': '%s/drivers/%s' % (public_url, self.hw1),
'rel': 'bookmark'}]
for i in expected:
self.assertIn(i, data['links'])
def test_links(self):
self._test_links()
def test_links_public_url(self):
self._test_links(public_url='http://foo')
@mock.patch.object(rpcapi.ConductorAPI, 'driver_vendor_passthru')
def test_driver_vendor_passthru_sync(self, mocked_driver_vendor_passthru):
self.register_fake_conductors()
mocked_driver_vendor_passthru.return_value = {
'return': {'return_key': 'return_value'},
'async': False,
'attach': False}
response = self.post_json(
'/drivers/%s/vendor_passthru/do_test' % self.hw1,
{'test_key': 'test_value'})
self.assertEqual(http_client.OK, response.status_int)
self.assertEqual(mocked_driver_vendor_passthru.return_value['return'],
response.json)
@mock.patch.object(rpcapi.ConductorAPI, 'driver_vendor_passthru')
def test_driver_vendor_passthru_async(self, mocked_driver_vendor_passthru):
self.register_fake_conductors()
mocked_driver_vendor_passthru.return_value = {'return': None,
'async': True,
'attach': False}
response = self.post_json(
'/drivers/%s/vendor_passthru/do_test' % self.hw1,
{'test_key': 'test_value'})
self.assertEqual(http_client.ACCEPTED, response.status_int)
self.assertIsNone(mocked_driver_vendor_passthru.return_value['return'])
@mock.patch.object(rpcapi.ConductorAPI, 'driver_vendor_passthru')
def test_driver_vendor_passthru_put(self, mocked_driver_vendor_passthru):
self.register_fake_conductors()
return_value = {'return': None, 'async': True, 'attach': False}
mocked_driver_vendor_passthru.return_value = return_value
response = self.put_json(
'/drivers/%s/vendor_passthru/do_test' % self.hw1,
{'test_key': 'test_value'})
self.assertEqual(http_client.ACCEPTED, response.status_int)
self.assertEqual(b'', response.body)
@mock.patch.object(rpcapi.ConductorAPI, 'driver_vendor_passthru')
def test_driver_vendor_passthru_get(self, mocked_driver_vendor_passthru):
self.register_fake_conductors()
return_value = {'return': 'foo', 'async': False, 'attach': False}
mocked_driver_vendor_passthru.return_value = return_value
response = self.get_json(
'/drivers/%s/vendor_passthru/do_test' % self.hw1)
self.assertEqual(return_value['return'], response)
@mock.patch.object(rpcapi.ConductorAPI, 'driver_vendor_passthru')
def test_driver_vendor_passthru_delete(self, mock_driver_vendor_passthru):
self.register_fake_conductors()
return_value = {'return': None, 'async': True, 'attach': False}
mock_driver_vendor_passthru.return_value = return_value
response = self.delete(
'/drivers/%s/vendor_passthru/do_test' % self.hw1)
self.assertEqual(http_client.ACCEPTED, response.status_int)
self.assertEqual(b'', response.body)
def test_driver_vendor_passthru_driver_not_found(self):
# tests when given driver is not found
# e.g. get_topic_for_driver fails to find the driver
response = self.post_json(
'/drivers/%s/vendor_passthru/do_test' % self.hw1,
{'test_key': 'test_value'},
expect_errors=True)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
def test_driver_vendor_passthru_method_not_found(self):
response = self.post_json(
'/drivers/%s/vendor_passthru' % self.hw1,
{'test_key': 'test_value'},
expect_errors=True)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
error = json.loads(response.json['error_message'])
self.assertEqual('Missing mandatory parameter: method',
error['faultstring'])
@mock.patch.object(rpcapi.ConductorAPI,
'get_driver_vendor_passthru_methods')
def test_driver_vendor_passthru_methods(self, get_methods_mock):
self.register_fake_conductors()
return_value = {'foo': 'bar'}
get_methods_mock.return_value = return_value
path = '/drivers/%s/vendor_passthru/methods' % self.hw1
data = self.get_json(path)
self.assertEqual(return_value, data)
get_methods_mock.assert_called_once_with(mock.ANY, self.hw1,
topic=mock.ANY)
# Now let's test the cache: Reset the mock
get_methods_mock.reset_mock()
# Call it again
data = self.get_json(path)
self.assertEqual(return_value, data)
# Assert RPC method wasn't called this time
self.assertFalse(get_methods_mock.called)
@mock.patch.object(rpcapi.ConductorAPI, 'get_raid_logical_disk_properties')
def test_raid_logical_disk_properties(self, disk_prop_mock):
driver._RAID_PROPERTIES = {}
self.register_fake_conductors()
properties = {'foo': 'description of foo'}
disk_prop_mock.return_value = properties
path = '/drivers/%s/raid/logical_disk_properties' % self.hw1
data = self.get_json(path,
headers={api_base.Version.string: "1.12"})
self.assertEqual(properties, data)
disk_prop_mock.assert_called_once_with(mock.ANY, self.hw1,
topic=mock.ANY)
@mock.patch.object(rpcapi.ConductorAPI, 'get_raid_logical_disk_properties')
def test_raid_logical_disk_properties_older_version(self, disk_prop_mock):
driver._RAID_PROPERTIES = {}
self.register_fake_conductors()
properties = {'foo': 'description of foo'}
disk_prop_mock.return_value = properties
path = '/drivers/%s/raid/logical_disk_properties' % self.hw1
ret = self.get_json(path,
headers={api_base.Version.string: "1.4"},
expect_errors=True)
self.assertEqual(http_client.NOT_ACCEPTABLE, ret.status_code)
@mock.patch.object(rpcapi.ConductorAPI, 'get_raid_logical_disk_properties')
def test_raid_logical_disk_properties_cached(self, disk_prop_mock):
# only one RPC-conductor call will be made and the info cached
# for subsequent requests
driver._RAID_PROPERTIES = {}
self.register_fake_conductors()
properties = {'foo': 'description of foo'}
disk_prop_mock.return_value = properties
path = '/drivers/%s/raid/logical_disk_properties' % self.hw1
for i in range(3):
data = self.get_json(path,
headers={api_base.Version.string: "1.12"})
self.assertEqual(properties, data)
disk_prop_mock.assert_called_once_with(mock.ANY, self.hw1,
topic=mock.ANY)
self.assertEqual(properties, driver._RAID_PROPERTIES[self.hw1])
@mock.patch.object(rpcapi.ConductorAPI, 'get_raid_logical_disk_properties')
def test_raid_logical_disk_properties_iface_not_supported(
self, disk_prop_mock):
driver._RAID_PROPERTIES = {}
self.register_fake_conductors()
disk_prop_mock.side_effect = exception.UnsupportedDriverExtension(
extension='raid', driver='fake-hardware')
path = '/drivers/%s/raid/logical_disk_properties' % self.hw1
ret = self.get_json(path,
headers={api_base.Version.string: "1.12"},
expect_errors=True)
self.assertEqual(http_client.NOT_FOUND, ret.status_code)
self.assertTrue(ret.json['error_message'])
disk_prop_mock.assert_called_once_with(mock.ANY, self.hw1,
topic=mock.ANY)
@mock.patch.object(rpcapi.ConductorAPI, 'get_driver_properties')
@mock.patch.object(rpcapi.ConductorAPI, 'get_topic_for_driver')
class TestDriverProperties(base.BaseApiTest):
def test_driver_properties_fake(self, mock_topic, mock_properties):
# Can get driver properties for fake driver.
driver._DRIVER_PROPERTIES = {}
driver_name = 'test'
mock_topic.return_value = 'fake_topic'
mock_properties.return_value = {'prop1': 'Property 1. Required.'}
data = self.get_json('/drivers/%s/properties' % driver_name)
self.assertEqual(mock_properties.return_value, data)
mock_topic.assert_called_once_with(driver_name)
mock_properties.assert_called_once_with(mock.ANY, driver_name,
topic=mock_topic.return_value)
self.assertEqual(mock_properties.return_value,
driver._DRIVER_PROPERTIES[driver_name])
def test_driver_properties_hw_type(self, mock_topic, mock_properties):
# Can get driver properties for manual-management hardware type
driver._DRIVER_PROPERTIES = {}
driver_name = 'manual-management'
mock_topic.return_value = 'fake_topic'
mock_properties.return_value = {'prop1': 'Property 1. Required.'}
with mock.patch.object(self.dbapi, 'get_active_hardware_type_dict',
autospec=True) as mock_hw_type:
mock_hw_type.return_value = {driver_name: 'fake_topic'}
data = self.get_json('/drivers/%s/properties' % driver_name)
self.assertEqual(mock_properties.return_value, data)
mock_topic.assert_called_once_with(driver_name)
mock_properties.assert_called_once_with(mock.ANY, driver_name,
topic=mock_topic.return_value)
self.assertEqual(mock_properties.return_value,
driver._DRIVER_PROPERTIES[driver_name])
def test_driver_properties_cached(self, mock_topic, mock_properties):
# only one RPC-conductor call will be made and the info cached
# for subsequent requests
driver._DRIVER_PROPERTIES = {}
driver_name = 'manual-management'
mock_topic.return_value = 'fake_topic'
mock_properties.return_value = {'prop1': 'Property 1. Required.'}
with mock.patch.object(self.dbapi, 'get_active_hardware_type_dict',
autospec=True) as mock_hw_type:
mock_hw_type.return_value = {driver_name: 'fake_topic'}
data = self.get_json('/drivers/%s/properties' % driver_name)
data = self.get_json('/drivers/%s/properties' % driver_name)
data = self.get_json('/drivers/%s/properties' % driver_name)
self.assertEqual(mock_properties.return_value, data)
mock_topic.assert_called_once_with(driver_name)
mock_properties.assert_called_once_with(mock.ANY, driver_name,
topic=mock_topic.return_value)
self.assertEqual(mock_properties.return_value,
driver._DRIVER_PROPERTIES[driver_name])
def test_driver_properties_invalid_driver_name(self, mock_topic,
mock_properties):
# Cannot get driver properties for an invalid driver; no RPC topic
# exists for it.
driver._DRIVER_PROPERTIES = {}
driver_name = 'bad_driver'
mock_topic.side_effect = exception.DriverNotFound(
driver_name=driver_name)
mock_properties.return_value = {'prop1': 'Property 1. Required.'}
ret = self.get_json('/drivers/%s/properties' % driver_name,
expect_errors=True)
self.assertEqual(http_client.NOT_FOUND, ret.status_int)
mock_topic.assert_called_once_with(driver_name)
self.assertFalse(mock_properties.called)
def test_driver_properties_cannot_load(self, mock_topic, mock_properties):
# Cannot get driver properties for the driver. Although an RPC topic
# exists for it, the conductor wasn't able to load it.
driver._DRIVER_PROPERTIES = {}
driver_name = 'driver'
mock_topic.return_value = 'driver_topic'
mock_properties.side_effect = exception.DriverNotFound(
driver_name=driver_name)
ret = self.get_json('/drivers/%s/properties' % driver_name,
expect_errors=True)
self.assertEqual(http_client.NOT_FOUND, ret.status_int)
mock_topic.assert_called_once_with(driver_name)
mock_properties.assert_called_once_with(mock.ANY, driver_name,
topic=mock_topic.return_value)
| 45.027881
| 79
| 0.623942
|
97d49d77635d16780fe3a592b4f2c1e5959b3d3b
| 12,223
|
py
|
Python
|
chives/cmds/passphrase_funcs.py
|
zcomputerwiz/chives-blockchain
|
73d268bf76f50ff6133c868b58891e75739a2708
|
[
"Apache-2.0"
] | 75
|
2021-06-27T03:30:59.000Z
|
2022-03-20T12:32:55.000Z
|
chives/cmds/passphrase_funcs.py
|
zcomputerwiz/chives-blockchain
|
73d268bf76f50ff6133c868b58891e75739a2708
|
[
"Apache-2.0"
] | 39
|
2021-07-02T07:11:24.000Z
|
2022-03-20T15:00:07.000Z
|
chives/cmds/passphrase_funcs.py
|
zcomputerwiz/chives-blockchain
|
73d268bf76f50ff6133c868b58891e75739a2708
|
[
"Apache-2.0"
] | 41
|
2021-06-24T11:24:43.000Z
|
2022-03-14T16:11:38.000Z
|
import click
import colorama
import os
import sys
from chives.daemon.client import acquire_connection_to_daemon
from chives.util.keychain import Keychain, obtain_current_passphrase, supports_os_passphrase_storage
from chives.util.keyring_wrapper import DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE
from chives.util.misc import prompt_yes_no
from chives.util.ws_message import WsRpcMessage
from getpass import getpass
from io import TextIOWrapper
from pathlib import Path
from typing import Optional, Tuple
# Click drops leading dashes, and converts remaining dashes to underscores. e.g. --set-passphrase -> 'set_passphrase'
PASSPHRASE_CLI_OPTION_NAMES = ["keys_root_path", "set_passphrase", "passphrase_file", "current_passphrase_file"]
SAVE_MASTER_PASSPHRASE_WARNING = (
colorama.Fore.YELLOW
+ colorama.Style.BRIGHT
+ "\n!!! SECURITY WARNING !!!\n"
+ colorama.Style.RESET_ALL
+ "Other processes may be able to access your saved passphrase, possibly exposing your private keys.\n"
+ "You should not save your passphrase unless you fully trust your environment.\n"
)
def remove_passphrase_options_from_cmd(cmd) -> None:
"""
Filters-out passphrase options from a given Click command object
"""
# TODO: Click doesn't seem to have a great way of adding/removing params using an
# existing command, and using the decorator-supported construction of options doesn't
# allow for conditionally including options. Once keyring passphrase management is
# rolled out to all platforms this can be removed.
cmd.params = [param for param in cmd.params if param.name not in PASSPHRASE_CLI_OPTION_NAMES]
def verify_passphrase_meets_requirements(
new_passphrase: str, confirmation_passphrase: str
) -> Tuple[bool, Optional[str]]:
match = new_passphrase == confirmation_passphrase
min_length = Keychain.minimum_passphrase_length()
meets_len_requirement = len(new_passphrase) >= min_length
if match and meets_len_requirement:
return True, None
elif not match:
return False, "Passphrases do not match"
elif not meets_len_requirement:
return False, f"Minimum passphrase length is {min_length}"
else:
raise Exception("Unexpected passphrase verification case")
def prompt_for_passphrase(prompt: str) -> str:
if sys.platform == "win32" or sys.platform == "cygwin":
print(prompt, end="")
prompt = ""
return getpass(prompt)
def prompt_to_save_passphrase() -> bool:
save: bool = False
try:
if supports_os_passphrase_storage():
location: Optional[str] = None
warning: Optional[str] = None
if sys.platform == "darwin":
location = "macOS Keychain"
warning = SAVE_MASTER_PASSPHRASE_WARNING
elif sys.platform == "win32" or sys.platform == "cygwin":
location = "Windows Credential Manager"
warning = SAVE_MASTER_PASSPHRASE_WARNING
if location is None:
raise ValueError("OS-specific credential store not specified")
print(
"\n"
"Your passphrase can be stored in your system's secure credential store. "
"Other Chives processes will be able to access your keys without prompting for your passphrase."
)
if warning is not None:
colorama.init()
print(warning)
save = prompt_yes_no(f"Would you like to save your passphrase to the {location} (y/n) ")
except Exception as e:
print(f"Caught exception: {e}")
return False
return save
def prompt_for_new_passphrase() -> Tuple[str, bool]:
min_length: int = Keychain.minimum_passphrase_length()
if min_length > 0:
n = min_length
print(f"\nPassphrases must be {n} or more characters in length") # lgtm [py/clear-text-logging-sensitive-data]
while True:
passphrase: str = getpass("New Passphrase: ")
confirmation: str = getpass("Confirm Passphrase: ")
save_passphrase: bool = False
valid_passphrase, error_msg = verify_passphrase_meets_requirements(passphrase, confirmation)
if valid_passphrase:
if supports_os_passphrase_storage():
save_passphrase = prompt_to_save_passphrase()
return passphrase, save_passphrase
elif error_msg:
print(f"{error_msg}\n") # lgtm [py/clear-text-logging-sensitive-data]
def read_passphrase_from_file(passphrase_file: TextIOWrapper) -> str:
passphrase = passphrase_file.read().rstrip(os.environ.get("CHIVES_PASSPHRASE_STRIP_TRAILING_CHARS", "\r\n"))
passphrase_file.close()
return passphrase
def initialize_passphrase() -> None:
if Keychain.has_master_passphrase():
print("Keyring is already protected by a passphrase")
print("\nUse 'chives passphrase set' or 'chives passphrase remove' to update or remove your passphrase")
sys.exit(1)
# We'll rely on Keyring initialization to leverage the cached passphrase for
# bootstrapping the keyring encryption process
print("Setting keyring passphrase")
passphrase: Optional[str] = None
# save_passphrase indicates whether the passphrase should be saved in the
# macOS Keychain or Windows Credential Manager
save_passphrase: bool = False
if Keychain.has_cached_passphrase():
passphrase = Keychain.get_cached_master_passphrase()
if not passphrase or passphrase == default_passphrase():
passphrase, save_passphrase = prompt_for_new_passphrase()
Keychain.set_master_passphrase(current_passphrase=None, new_passphrase=passphrase, save_passphrase=save_passphrase)
def set_or_update_passphrase(passphrase: Optional[str], current_passphrase: Optional[str], hint: Optional[str]) -> bool:
# Prompt for the current passphrase, if necessary
if Keychain.has_master_passphrase():
# Try the default passphrase first
if using_default_passphrase():
current_passphrase = default_passphrase()
if not current_passphrase:
try:
current_passphrase = obtain_current_passphrase("Current Passphrase: ")
except Exception as e:
print(f"Unable to confirm current passphrase: {e}")
sys.exit(1)
success: bool = False
new_passphrase: Optional[str] = passphrase
save_passphrase: bool = False
try:
# Prompt for the new passphrase, if necessary
if new_passphrase is None:
new_passphrase, save_passphrase = prompt_for_new_passphrase()
if new_passphrase == current_passphrase:
raise ValueError("passphrase is unchanged")
Keychain.set_master_passphrase(
current_passphrase=current_passphrase,
new_passphrase=new_passphrase,
passphrase_hint=hint,
save_passphrase=save_passphrase,
)
success = True
except Exception as e:
print(f"Unable to set or update passphrase: {e}")
success = False
return success
def remove_passphrase(current_passphrase: Optional[str]) -> bool:
"""
Removes the user's keyring passphrase. The keyring will be re-encrypted to the default passphrase.
"""
success = False
if not Keychain.has_master_passphrase() or using_default_passphrase():
print("Passphrase is not currently set")
success = False
else:
# Try the default passphrase first
if using_default_passphrase():
current_passphrase = default_passphrase()
# Prompt for the current passphrase, if necessary
if not current_passphrase:
try:
current_passphrase = obtain_current_passphrase("Current Passphrase: ")
except Exception as e:
print(f"Unable to confirm current passphrase: {e}")
success = False
if current_passphrase:
try:
Keychain.remove_master_passphrase(current_passphrase)
success = True
except Exception as e:
print(f"Unable to remove passphrase: {e}")
success = False
return success
def cache_passphrase(passphrase: str) -> None:
Keychain.set_cached_master_passphrase(passphrase)
def get_current_passphrase() -> Optional[str]:
if not Keychain.has_master_passphrase():
return None
current_passphrase = None
if using_default_passphrase():
current_passphrase = default_passphrase()
else:
try:
current_passphrase = obtain_current_passphrase()
except Exception as e:
print(f"Unable to confirm current passphrase: {e}")
raise e
return current_passphrase
def default_passphrase() -> str:
return DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE
def using_default_passphrase() -> bool:
if not Keychain.has_master_passphrase():
return False
return Keychain.master_passphrase_is_valid(default_passphrase())
def display_passphrase_hint() -> None:
passphrase_hint = Keychain.get_master_passphrase_hint()
if passphrase_hint is not None:
print(f"Passphrase hint: {passphrase_hint}") # lgtm [py/clear-text-logging-sensitive-data]
else:
print("Passphrase hint is not set")
def update_passphrase_hint(hint: Optional[str] = None) -> bool:
updated: bool = False
if Keychain.has_master_passphrase() is False or using_default_passphrase():
print("Updating the passphrase hint requires that a passphrase has been set")
else:
current_passphrase: Optional[str] = get_current_passphrase()
if current_passphrase is None:
print("Keyring is not passphrase-protected")
else:
# Set or remove the passphrase hint
Keychain.set_master_passphrase_hint(current_passphrase, hint)
updated = True
return updated
def set_passphrase_hint(hint: str) -> None:
if update_passphrase_hint(hint):
print("Passphrase hint set")
else:
print("Passphrase hint was not updated")
def remove_passphrase_hint() -> None:
if update_passphrase_hint(None):
print("Passphrase hint removed")
else:
print("Passphrase hint was not removed")
async def async_update_daemon_passphrase_cache_if_running(root_path: Path) -> None:
"""
Attempt to connect to the daemon and update the cached passphrase
"""
new_passphrase = Keychain.get_cached_master_passphrase()
assert new_passphrase is not None
try:
async with acquire_connection_to_daemon(root_path, quiet=True) as daemon:
if daemon is not None:
response = await daemon.unlock_keyring(new_passphrase)
if response is None:
raise Exception("daemon didn't respond")
success: bool = response.get("data", {}).get("success", False)
if success is False:
error = response.get("data", {}).get("error", "unknown error")
raise Exception(error)
except Exception as e:
print(f"Failed to notify daemon of updated keyring passphrase: {e}")
async def async_update_daemon_migration_completed_if_running() -> None:
"""
Attempt to connect to the daemon to notify that keyring migration has completed.
This allows the daemon to refresh its keyring so that it can stop using the
legacy keyring.
"""
ctx: click.Context = click.get_current_context()
root_path: Path = ctx.obj["root_path"]
if root_path is None:
print("Missing root_path in context. Unable to notify daemon")
return None
async with acquire_connection_to_daemon(root_path, quiet=True) as daemon:
if daemon is not None:
passphrase: str = Keychain.get_cached_master_passphrase()
print("Updating daemon... ", end="")
response: WsRpcMessage = await daemon.notify_keyring_migration_completed(passphrase)
success: bool = response.get("data", {}).get("success", False)
print("succeeded" if success is True else "failed")
| 36.377976
| 120
| 0.681338
|
edb74dda031c12c52bddafbd203becff985c047e
| 2,905
|
py
|
Python
|
release/stubs.min/System/Net/__init___parts/OpenWriteCompletedEventHandler.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 182
|
2017-06-27T02:26:15.000Z
|
2022-03-30T18:53:43.000Z
|
release/stubs.min/System/Net/__init___parts/OpenWriteCompletedEventHandler.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 28
|
2017-06-27T13:38:23.000Z
|
2022-03-15T11:19:44.000Z
|
release/stubs.min/System/Net/__init___parts/OpenWriteCompletedEventHandler.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 67
|
2017-06-28T09:43:59.000Z
|
2022-03-20T21:17:10.000Z
|
class OpenWriteCompletedEventHandler(MulticastDelegate,ICloneable,ISerializable):
"""
Represents the method that will handle the System.Net.WebClient.OpenWriteCompleted event of a System.Net.WebClient.
OpenWriteCompletedEventHandler(object: object,method: IntPtr)
"""
def BeginInvoke(self,sender,e,callback,object):
""" BeginInvoke(self: OpenWriteCompletedEventHandler,sender: object,e: OpenWriteCompletedEventArgs,callback: AsyncCallback,object: object) -> IAsyncResult """
pass
def CombineImpl(self,*args):
"""
CombineImpl(self: MulticastDelegate,follow: Delegate) -> Delegate
Combines this System.Delegate with the specified System.Delegate to form a new delegate.
follow: The delegate to combine with this delegate.
Returns: A delegate that is the new root of the System.MulticastDelegate invocation list.
"""
pass
def DynamicInvokeImpl(self,*args):
"""
DynamicInvokeImpl(self: Delegate,args: Array[object]) -> object
Dynamically invokes (late-bound) the method represented by the current delegate.
args: An array of objects that are the arguments to pass to the method represented by the current
delegate.-or- null,if the method represented by the current delegate does not require
arguments.
Returns: The object returned by the method represented by the delegate.
"""
pass
def EndInvoke(self,result):
""" EndInvoke(self: OpenWriteCompletedEventHandler,result: IAsyncResult) """
pass
def GetMethodImpl(self,*args):
"""
GetMethodImpl(self: MulticastDelegate) -> MethodInfo
Returns a static method represented by the current System.MulticastDelegate.
Returns: A static method represented by the current System.MulticastDelegate.
"""
pass
def Invoke(self,sender,e):
""" Invoke(self: OpenWriteCompletedEventHandler,sender: object,e: OpenWriteCompletedEventArgs) """
pass
def RemoveImpl(self,*args):
"""
RemoveImpl(self: MulticastDelegate,value: Delegate) -> Delegate
Removes an element from the invocation list of this System.MulticastDelegate that is equal to
the specified delegate.
value: The delegate to search for in the invocation list.
Returns: If value is found in the invocation list for this instance,then a new System.Delegate without
value in its invocation list; otherwise,this instance with its original invocation list.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,object,method):
""" __new__(cls: type,object: object,method: IntPtr) """
pass
def __reduce_ex__(self,*args):
pass
| 30.578947
| 215
| 0.723236
|
cc68f78ef97c96240df40aef807b0d575995cfbb
| 2,515
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/containerservice/v20200201/list_managed_cluster_admin_credentials.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/containerservice/v20200201/list_managed_cluster_admin_credentials.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/containerservice/v20200201/list_managed_cluster_admin_credentials.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'ListManagedClusterAdminCredentialsResult',
'AwaitableListManagedClusterAdminCredentialsResult',
'list_managed_cluster_admin_credentials',
]
@pulumi.output_type
class ListManagedClusterAdminCredentialsResult:
"""
The list of credential result response.
"""
def __init__(__self__, kubeconfigs=None):
if kubeconfigs and not isinstance(kubeconfigs, list):
raise TypeError("Expected argument 'kubeconfigs' to be a list")
pulumi.set(__self__, "kubeconfigs", kubeconfigs)
@property
@pulumi.getter
def kubeconfigs(self) -> Sequence['outputs.CredentialResultResponseResult']:
"""
Base64-encoded Kubernetes configuration file.
"""
return pulumi.get(self, "kubeconfigs")
class AwaitableListManagedClusterAdminCredentialsResult(ListManagedClusterAdminCredentialsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListManagedClusterAdminCredentialsResult(
kubeconfigs=self.kubeconfigs)
def list_managed_cluster_admin_credentials(resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListManagedClusterAdminCredentialsResult:
"""
Use this data source to access information about an existing resource.
:param str resource_group_name: The name of the resource group.
:param str resource_name: The name of the managed cluster resource.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:containerservice/v20200201:listManagedClusterAdminCredentials', __args__, opts=opts, typ=ListManagedClusterAdminCredentialsResult).value
return AwaitableListManagedClusterAdminCredentialsResult(
kubeconfigs=__ret__.kubeconfigs)
| 38.106061
| 187
| 0.716103
|
9459fc97d6d704861f68ff2cbecb32c5b6e0e84a
| 48,801
|
py
|
Python
|
venv/Lib/site-packages/PyQt4/examples/tools/i18n/i18n_rc.py
|
prateekfxtd/ns_Startup
|
095a62b3a8c7bf0ff7b767355d57d993bbd2423d
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/PyQt4/examples/tools/i18n/i18n_rc.py
|
prateekfxtd/ns_Startup
|
095a62b3a8c7bf0ff7b767355d57d993bbd2423d
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/PyQt4/examples/tools/i18n/i18n_rc.py
|
prateekfxtd/ns_Startup
|
095a62b3a8c7bf0ff7b767355d57d993bbd2423d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: Sat 2. Mar 10:35:48 2013
# by: The Resource Compiler for PyQt (Qt v4.8.4)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x03\x4c\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x22\x00\x47\xdf\x04\
\x00\x00\x00\x3d\x00\x4d\x09\xa4\x00\x00\x00\x58\x00\x5a\xf0\x84\
\x00\x00\x00\x71\x02\xf0\x8c\x31\x00\x00\x00\x8e\x05\x93\x08\xe5\
\x00\x00\x00\xaf\x05\x9b\xa6\x44\x00\x00\x00\xc8\x06\x3c\xe8\x53\
\x00\x00\x00\xe3\x06\xec\x79\x65\x00\x00\x01\x04\x0c\x4e\x30\xd8\
\x00\x00\x01\x25\x0e\x9f\xe7\x05\x00\x00\x01\x40\x69\x00\x00\x01\
\x87\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x06\x00\x56\x00\x75\x00\x65\x05\x00\x05\
\xcf\xc7\x01\x03\x00\x00\x00\x10\x00\x26\x00\x46\x00\x69\x00\x63\
\x00\x68\x00\x69\x00\x65\x00\x72\x05\x00\x2a\xd0\x25\x01\x03\x00\
\x00\x00\x10\x00\x26\x00\x51\x00\x75\x00\x69\x00\x74\x00\x74\x00\
\x65\x00\x72\x05\x00\x47\xdf\x04\x01\x03\x00\x00\x00\x0e\x00\x50\
\x00\x72\x00\x65\x00\x6d\x00\x69\x00\x65\x00\x72\x05\x00\x4d\x09\
\xa4\x01\x03\x00\x00\x00\x12\x00\x54\x00\x72\x00\x6f\x00\x69\x00\
\x73\x00\x69\x00\xe8\x00\x6d\x00\x65\x05\x00\x5a\xf0\x84\x01\x03\
\x00\x00\x00\x16\x00\x4c\x00\x61\x00\x6e\x00\x67\x00\x75\x00\x65\
\x00\x20\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\
\x03\x00\x00\x00\x0e\x00\x4f\x00\x62\x00\x6c\x00\x69\x00\x71\x00\
\x75\x00\x65\x05\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x10\x00\x44\
\x00\x65\x00\x75\x00\x78\x00\x69\x00\xe8\x00\x6d\x00\x65\x05\x05\
\x9b\xa6\x44\x01\x03\x00\x00\x00\x16\x00\x49\x00\x73\x00\x6f\x00\
\x6d\x00\xe9\x00\x74\x00\x72\x00\x69\x00\x71\x00\x75\x00\x65\x05\
\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x16\x00\x50\x00\x65\x00\x72\
\x00\x73\x00\x70\x00\x65\x00\x63\x00\x74\x00\x69\x00\x76\x00\x65\
\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\x10\x00\x46\x00\x72\x00\
\x61\x00\x6e\x00\xe7\x00\x61\x00\x69\x00\x73\x05\x0c\x4e\x30\xd8\
\x01\x03\x00\x00\x00\x3c\x00\x45\x00\x78\x00\x65\x00\x6d\x00\x70\
\x00\x6c\x00\x65\x00\x20\x00\x64\x00\x27\x00\x69\x00\x6e\x00\x74\
\x00\x65\x00\x72\x00\x6e\x00\x61\x00\x74\x00\x69\x00\x6f\x00\x6e\
\x00\x61\x00\x6c\x00\x69\x00\x73\x00\x61\x00\x74\x00\x69\x00\x6f\
\x00\x6e\x05\x0e\x9f\xe7\x05\x01\x2f\x00\x00\x01\x3e\x00\x97\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\
\x4d\x61\x69\x6e\x57\x69\x6e\x64\x6f\x77\x00\
\x00\x00\x02\xb2\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x20\x00\x47\xdf\x04\
\x00\x00\x00\x33\x00\x4d\x09\xa4\x00\x00\x00\x46\x00\x5a\xf0\x84\
\x00\x00\x00\x57\x02\xf0\x8c\x31\x00\x00\x00\x68\x05\x93\x08\xe5\
\x00\x00\x00\x81\x05\x9b\xa6\x44\x00\x00\x00\x90\x06\x3c\xe8\x53\
\x00\x00\x00\xa1\x06\xec\x79\x65\x00\x00\x00\xb2\x0c\x4e\x30\xd8\
\x00\x00\x00\xc5\x0e\x9f\xe7\x05\x00\x00\x00\xd6\x69\x00\x00\x00\
\xed\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x04\xbc\xf4\xae\x30\x05\x00\x05\xcf\xc7\
\x01\x03\x00\x00\x00\x08\xd3\x0c\xc7\x7c\x00\x26\x00\x46\x05\x00\
\x2a\xd0\x25\x01\x03\x00\x00\x00\x08\xc8\x85\xb8\xcc\x00\x26\x00\
\x58\x05\x00\x47\xdf\x04\x01\x03\x00\x00\x00\x06\xcc\xab\xbc\x88\
\xc9\xf8\x05\x00\x4d\x09\xa4\x01\x03\x00\x00\x00\x06\xc1\x38\xbc\
\x88\xc9\xf8\x05\x00\x5a\xf0\x84\x01\x03\x00\x00\x00\x0e\xc5\xb8\
\xc5\xb4\x00\x20\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\
\x31\x01\x03\x00\x00\x00\x04\xbe\x57\xac\x01\x05\x05\x93\x08\xe5\
\x01\x03\x00\x00\x00\x06\xb4\x50\xbc\x88\xc9\xf8\x05\x05\x9b\xa6\
\x44\x01\x03\x00\x00\x00\x06\xb4\xf1\xce\x21\xb3\xc4\x05\x06\x3c\
\xe8\x53\x01\x03\x00\x00\x00\x08\xc6\xd0\xad\xfc\xd6\x54\xbc\x95\
\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\x06\xd5\x5c\xad\x6d\xc5\
\xb4\x05\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x0c\xad\x6d\xc8\x1c\
\xd6\x54\x00\x20\xc6\x08\xc8\x1c\x05\x0e\x9f\xe7\x05\x01\x2f\x00\
\x00\x01\x3e\x00\x97\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x0a\x4d\x61\x69\x6e\x57\x69\x6e\x64\x6f\x77\
\x00\
\x00\x00\x03\x26\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x22\x00\x47\xdf\x04\
\x00\x00\x00\x35\x00\x4d\x09\xa4\x00\x00\x00\x4a\x00\x5a\xf0\x84\
\x00\x00\x00\x61\x02\xf0\x8c\x31\x00\x00\x00\x78\x05\x93\x08\xe5\
\x00\x00\x00\x93\x05\x9b\xa6\x44\x00\x00\x00\xaa\x06\x3c\xe8\x53\
\x00\x00\x00\xc1\x06\xec\x79\x65\x00\x00\x00\xe8\x0c\x4e\x30\xd8\
\x00\x00\x01\x09\x0e\x9f\xe7\x05\x00\x00\x01\x22\x69\x00\x00\x01\
\x61\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x06\x04\x12\x04\x38\x04\x34\x05\x00\x05\
\xcf\xc7\x01\x03\x00\x00\x00\x08\x04\x24\x04\x30\x04\x39\x04\x3b\
\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\x0a\x04\x12\x04\x4b\x04\
\x45\x04\x3e\x04\x34\x05\x00\x47\xdf\x04\x01\x03\x00\x00\x00\x0c\
\x04\x1f\x04\x35\x04\x40\x04\x32\x04\x4b\x04\x39\x05\x00\x4d\x09\
\xa4\x01\x03\x00\x00\x00\x0c\x04\x22\x04\x40\x04\x35\x04\x42\x04\
\x38\x04\x39\x05\x00\x5a\xf0\x84\x01\x03\x00\x00\x00\x10\x04\x2f\
\x04\x37\x04\x4b\x04\x3a\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\
\xf0\x8c\x31\x01\x03\x00\x00\x00\x0c\x04\x1a\x04\x43\x04\x40\x04\
\x41\x04\x38\x04\x32\x05\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x0c\
\x04\x12\x04\x42\x04\x3e\x04\x40\x04\x3e\x04\x39\x05\x05\x9b\xa6\
\x44\x01\x03\x00\x00\x00\x1c\x04\x18\x04\x37\x04\x3e\x04\x3c\x04\
\x35\x04\x42\x04\x40\x04\x38\x04\x47\x04\x35\x04\x41\x04\x3a\x04\
\x38\x04\x39\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x16\x04\x1f\
\x04\x35\x04\x40\x04\x41\x04\x3f\x04\x35\x04\x3a\x04\x42\x04\x38\
\x04\x32\x04\x30\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\x0e\x04\
\x20\x04\x43\x04\x41\x04\x41\x04\x3a\x04\x38\x04\x39\x05\x0c\x4e\
\x30\xd8\x01\x03\x00\x00\x00\x34\x04\x1f\x04\x40\x04\x38\x04\x3c\
\x04\x35\x04\x40\x00\x20\x04\x38\x04\x3d\x04\x42\x04\x35\x04\x40\
\x04\x3d\x04\x30\x04\x46\x04\x38\x04\x3d\x04\x3e\x04\x30\x04\x3b\
\x04\x38\x04\x37\x04\x30\x04\x46\x04\x38\x04\x38\x05\x0e\x9f\xe7\
\x05\x01\x2f\x00\x00\x01\x3e\x00\x97\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x4d\x61\x69\x6e\x57\x69\
\x6e\x64\x6f\x77\x00\
\x00\x00\x03\x2e\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x24\x00\x47\xdf\x04\
\x00\x00\x00\x3b\x00\x4d\x09\xa4\x00\x00\x00\x56\x00\x5a\xf0\x84\
\x00\x00\x00\x6d\x02\xf0\x8c\x31\x00\x00\x00\x84\x05\x93\x08\xe5\
\x00\x00\x00\xa1\x05\x9b\xa6\x44\x00\x00\x00\xb6\x06\x3c\xe8\x53\
\x00\x00\x00\xcb\x06\xec\x79\x65\x00\x00\x00\xec\x0c\x4e\x30\xd8\
\x00\x00\x01\x0d\x0e\x9f\xe7\x05\x00\x00\x01\x26\x69\x00\x00\x01\
\x69\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x08\x00\x56\x00\x69\x00\x73\x00\x61\x05\
\x00\x05\xcf\xc7\x01\x03\x00\x00\x00\x0c\x00\x26\x00\x41\x00\x72\
\x00\x6b\x00\x69\x00\x76\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\
\x10\x00\x26\x00\x41\x00\x76\x00\x73\x00\x6c\x00\x75\x00\x74\x00\
\x61\x05\x00\x47\xdf\x04\x01\x03\x00\x00\x00\x0c\x00\x46\x00\xf6\
\x00\x72\x00\x73\x00\x74\x00\x61\x05\x00\x4d\x09\xa4\x01\x03\x00\
\x00\x00\x0c\x00\x54\x00\x72\x00\x65\x00\x64\x00\x6a\x00\x65\x05\
\x00\x5a\xf0\x84\x01\x03\x00\x00\x00\x12\x00\x53\x00\x70\x00\x72\
\x00\xe5\x00\x6b\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\
\x31\x01\x03\x00\x00\x00\x0a\x00\x53\x00\x6b\x00\x65\x00\x76\x00\
\x74\x05\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x0a\x00\x41\x00\x6e\
\x00\x64\x00\x72\x00\x61\x05\x05\x9b\xa6\x44\x01\x03\x00\x00\x00\
\x16\x00\x49\x00\x73\x00\x6f\x00\x6d\x00\x65\x00\x74\x00\x72\x00\
\x69\x00\x73\x00\x6b\x00\x74\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\
\x00\x16\x00\x50\x00\x65\x00\x72\x00\x73\x00\x70\x00\x65\x00\x6b\
\x00\x74\x00\x69\x00\x76\x00\x74\x05\x06\xec\x79\x65\x01\x03\x00\
\x00\x00\x0e\x00\x53\x00\x76\x00\x65\x00\x6e\x00\x73\x00\x6b\x00\
\x61\x05\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x38\x00\x49\x00\x6e\
\x00\x74\x00\x65\x00\x72\x00\x6e\x00\x61\x00\x74\x00\x69\x00\x6f\
\x00\x6e\x00\x61\x00\x6c\x00\x69\x00\x73\x00\x65\x00\x72\x00\x69\
\x00\x6e\x00\x67\x00\x73\x00\x65\x00\x78\x00\x65\x00\x6d\x00\x70\
\x00\x65\x00\x6c\x05\x0e\x9f\xe7\x05\x01\x2f\x00\x00\x01\x3e\x00\
\x97\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x0a\x4d\x61\x69\x6e\x57\x69\x6e\x64\x6f\x77\x00\
\x00\x00\x03\x50\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x2a\x00\x47\xdf\x04\
\x00\x00\x00\x41\x00\x4d\x09\xa4\x00\x00\x00\x5c\x00\x5a\xf0\x84\
\x00\x00\x00\x75\x02\xf0\x8c\x31\x00\x00\x00\x90\x05\x93\x08\xe5\
\x00\x00\x00\xb1\x05\x9b\xa6\x44\x00\x00\x00\xc8\x06\x3c\xe8\x53\
\x00\x00\x00\xe3\x06\xec\x79\x65\x00\x00\x01\x04\x0c\x4e\x30\xd8\
\x00\x00\x01\x2b\x0e\x9f\xe7\x05\x00\x00\x01\x44\x69\x00\x00\x01\
\x8b\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x0e\x00\x41\x00\x6e\x00\x73\x00\x69\x00\
\x63\x00\x68\x00\x74\x05\x00\x05\xcf\xc7\x01\x03\x00\x00\x00\x0c\
\x00\x26\x00\x44\x00\x61\x00\x74\x00\x65\x00\x69\x05\x00\x2a\xd0\
\x25\x01\x03\x00\x00\x00\x10\x00\x42\x00\x65\x00\x26\x00\x65\x00\
\x6e\x00\x64\x00\x65\x00\x6e\x05\x00\x47\xdf\x04\x01\x03\x00\x00\
\x00\x0e\x00\x45\x00\x72\x00\x73\x00\x74\x00\x65\x00\x6e\x00\x73\
\x05\x00\x4d\x09\xa4\x01\x03\x00\x00\x00\x10\x00\x44\x00\x72\x00\
\x69\x00\x74\x00\x74\x00\x65\x00\x6e\x00\x73\x05\x00\x5a\xf0\x84\
\x01\x03\x00\x00\x00\x16\x00\x53\x00\x70\x00\x72\x00\x61\x00\x63\
\x00\x68\x00\x65\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\
\x31\x01\x03\x00\x00\x00\x0c\x00\x53\x00\x63\x00\x68\x00\x69\x00\
\x65\x00\x66\x05\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x10\x00\x5a\
\x00\x77\x00\x65\x00\x69\x00\x74\x00\x65\x00\x6e\x00\x73\x05\x05\
\x9b\xa6\x44\x01\x03\x00\x00\x00\x16\x00\x49\x00\x73\x00\x6f\x00\
\x6d\x00\x65\x00\x74\x00\x72\x00\x69\x00\x73\x00\x63\x00\x68\x05\
\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x1c\x00\x50\x00\x65\x00\x72\
\x00\x73\x00\x70\x00\x65\x00\x6b\x00\x74\x00\x69\x00\x76\x00\x69\
\x00\x73\x00\x63\x00\x68\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\
\x0e\x00\x44\x00\x65\x00\x75\x00\x74\x00\x73\x00\x63\x00\x68\x05\
\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x3c\x00\x49\x00\x6e\x00\x74\
\x00\x65\x00\x72\x00\x6e\x00\x61\x00\x74\x00\x69\x00\x6f\x00\x6e\
\x00\x61\x00\x6c\x00\x69\x00\x73\x00\x69\x00\x65\x00\x72\x00\x75\
\x00\x6e\x00\x67\x00\x73\x00\x62\x00\x65\x00\x69\x00\x73\x00\x70\
\x00\x69\x00\x65\x00\x6c\x05\x0e\x9f\xe7\x05\x01\x2f\x00\x00\x01\
\x3e\x00\x97\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x0a\x4d\x61\x69\x6e\x57\x69\x6e\x64\x6f\x77\x00\
\x00\x00\x02\xbc\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x20\x00\x47\xdf\x04\
\x00\x00\x00\x37\x00\x4d\x09\xa4\x00\x00\x00\x4e\x00\x5a\xf0\x84\
\x00\x00\x00\x5f\x02\xf0\x8c\x31\x00\x00\x00\x70\x05\x93\x08\xe5\
\x00\x00\x00\x87\x05\x9b\xa6\x44\x00\x00\x00\x98\x06\x3c\xe8\x53\
\x00\x00\x00\xa9\x06\xec\x79\x65\x00\x00\x00\xbc\x0c\x4e\x30\xd8\
\x00\x00\x00\xcf\x0e\x9f\xe7\x05\x00\x00\x00\xe2\x69\x00\x00\x00\
\xf7\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x04\x89\xc6\x56\xfe\x05\x00\x05\xcf\xc7\
\x01\x03\x00\x00\x00\x0c\x65\x87\x4e\xf6\x00\x5b\x00\x26\x00\x46\
\x00\x5d\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\x0c\x90\x00\x51\
\xfa\x00\x5b\x00\x26\x00\x78\x00\x5d\x05\x00\x47\xdf\x04\x01\x03\
\x00\x00\x00\x06\x7b\x2c\x4e\x00\x4e\x2a\x05\x00\x4d\x09\xa4\x01\
\x03\x00\x00\x00\x06\x7b\x2c\x4e\x09\x4e\x2a\x05\x00\x5a\xf0\x84\
\x01\x03\x00\x00\x00\x0c\x8b\xed\x8a\x00\x00\x3a\x00\x20\x00\x25\
\x00\x31\x05\x02\xf0\x8c\x31\x01\x03\x00\x00\x00\x06\x65\x9c\x62\
\x95\x5f\x71\x05\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x06\x7b\x2c\
\x4e\x8c\x4e\x2a\x05\x05\x9b\xa6\x44\x01\x03\x00\x00\x00\x08\x7b\
\x49\x89\xd2\x62\x95\x5f\x71\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\
\x00\x08\x90\x0f\x89\xc6\x62\x95\x5f\x71\x05\x06\xec\x79\x65\x01\
\x03\x00\x00\x00\x08\x7b\x80\x4f\x53\x4e\x2d\x65\x87\x05\x0c\x4e\
\x30\xd8\x01\x03\x00\x00\x00\x0a\x56\xfd\x96\x45\x53\x16\x83\x03\
\x4f\x8b\x05\x0e\x9f\xe7\x05\x01\x2f\x00\x00\x01\x3e\x00\x97\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\
\x4d\x61\x69\x6e\x57\x69\x6e\x64\x6f\x77\x00\
\x00\x00\x02\xe0\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x24\x00\x47\xdf\x04\
\x00\x00\x00\x39\x00\x4d\x09\xa4\x00\x00\x00\x4c\x00\x5a\xf0\x84\
\x00\x00\x00\x5d\x02\xf0\x8c\x31\x00\x00\x00\x70\x05\x93\x08\xe5\
\x00\x00\x00\x8d\x05\x9b\xa6\x44\x00\x00\x00\xa0\x06\x3c\xe8\x53\
\x00\x00\x00\xb3\x06\xec\x79\x65\x00\x00\x00\xca\x0c\x4e\x30\xd8\
\x00\x00\x00\xdf\x0e\x9f\xe7\x05\x00\x00\x00\xf8\x69\x00\x00\x01\
\x1b\x03\x00\x00\x00\x06\x00\x52\x00\x54\x00\x4c\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x08\x06\x45\x06\x31\x06\x26\x06\x49\x05\
\x00\x05\xcf\xc7\x01\x03\x00\x00\x00\x0a\x06\x27\x06\x44\x06\x45\
\x06\x44\x06\x41\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\x08\x06\
\x23\x06\x2e\x06\x31\x06\x2c\x05\x00\x47\xdf\x04\x01\x03\x00\x00\
\x00\x06\x06\x23\x06\x48\x06\x44\x05\x00\x4d\x09\xa4\x01\x03\x00\
\x00\x00\x08\x06\x2b\x06\x27\x06\x44\x06\x2b\x05\x00\x5a\xf0\x84\
\x01\x03\x00\x00\x00\x12\x06\x27\x06\x44\x06\x44\x06\x3a\x06\x29\
\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\x03\x00\
\x00\x00\x08\x06\x45\x06\x35\x06\x45\x06\x2a\x05\x05\x93\x08\xe5\
\x01\x03\x00\x00\x00\x08\x06\x2b\x06\x27\x06\x46\x06\x49\x05\x05\
\x9b\xa6\x44\x01\x03\x00\x00\x00\x0c\x06\x45\x06\x2a\x06\x45\x06\
\x27\x06\x2b\x06\x44\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x0a\
\x06\x45\x06\x46\x06\x38\x06\x48\x06\x31\x05\x06\xec\x79\x65\x01\
\x03\x00\x00\x00\x0e\x06\x27\x06\x44\x06\x39\x06\x31\x06\x28\x06\
\x4a\x06\x29\x05\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x18\x06\x45\
\x06\x2b\x06\x27\x06\x44\x00\x20\x06\x27\x06\x44\x06\x2a\x06\x2f\
\x06\x48\x06\x4a\x06\x44\x05\x0e\x9f\xe7\x05\x01\x2f\x00\x00\x01\
\x3e\x00\x97\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x0a\x4d\x61\x69\x6e\x57\x69\x6e\x64\x6f\x77\x00\
\x00\x00\x03\x1c\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x28\x00\x47\xdf\x04\
\x00\x00\x00\x41\x00\x4d\x09\xa4\x00\x00\x00\x58\x00\x5a\xf0\x84\
\x00\x00\x00\x6d\x02\xf0\x8c\x31\x00\x00\x00\x82\x05\x93\x08\xe5\
\x00\x00\x00\x9f\x05\x9b\xa6\x44\x00\x00\x00\xbc\x06\x3c\xe8\x53\
\x00\x00\x00\xd1\x06\xec\x79\x65\x00\x00\x00\xf2\x0c\x4e\x30\xd8\
\x00\x00\x01\x15\x0e\x9f\xe7\x05\x00\x00\x01\x2a\x69\x00\x00\x01\
\x57\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x0c\x00\x50\x00\x6f\x00\x68\x00\x6c\x00\
\x65\x00\x64\x05\x00\x05\xcf\xc7\x01\x03\x00\x00\x00\x0e\x00\x26\
\x00\x53\x00\x6f\x00\x75\x00\x62\x00\x6f\x00\x72\x05\x00\x2a\xd0\
\x25\x01\x03\x00\x00\x00\x0c\x00\x26\x00\x4b\x00\x6f\x00\x6e\x00\
\x65\x00\x63\x05\x00\x47\xdf\x04\x01\x03\x00\x00\x00\x0a\x00\x50\
\x00\x72\x00\x76\x00\x6e\x00\xed\x05\x00\x4d\x09\xa4\x01\x03\x00\
\x00\x00\x0a\x00\x54\x01\x59\x00\x65\x00\x74\x00\xed\x05\x00\x5a\
\xf0\x84\x01\x03\x00\x00\x00\x12\x00\x4a\x00\x61\x00\x79\x00\x7a\
\x00\x6b\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\
\x03\x00\x00\x00\x12\x00\x4e\x00\x61\x00\x6b\x00\x6c\x00\x6f\x00\
\x6e\x01\x1b\x00\x6e\x00\xfd\x05\x05\x93\x08\xe5\x01\x03\x00\x00\
\x00\x0a\x00\x44\x00\x72\x00\x75\x00\x68\x00\xfd\x05\x05\x9b\xa6\
\x44\x01\x03\x00\x00\x00\x16\x00\x49\x00\x73\x00\x6f\x00\x6d\x00\
\x65\x00\x74\x00\x72\x00\x69\x00\x63\x00\x6b\x00\xfd\x05\x06\x3c\
\xe8\x53\x01\x03\x00\x00\x00\x18\x00\x50\x00\x65\x00\x72\x00\x73\
\x00\x70\x00\x65\x00\x6b\x00\x74\x00\x69\x00\x76\x00\x6e\x00\xed\
\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\x0a\x01\x0c\x00\x65\x00\
\x73\x00\x6b\x00\xfd\x05\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x22\
\x00\x55\x00\x6b\x00\xe1\x00\x7a\x00\x6b\x00\x61\x00\x20\x00\x6c\
\x00\x6f\x00\x6b\x00\x61\x00\x6c\x00\x69\x00\x7a\x00\x61\x00\x63\
\x00\x65\x05\x0e\x9f\xe7\x05\x01\x2f\x00\x00\x01\x3e\x00\x97\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\
\x4d\x61\x69\x6e\x57\x69\x6e\x64\x6f\x77\x00\
\x00\x00\x03\x28\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x26\x00\x47\xdf\x04\
\x00\x00\x00\x3b\x00\x4d\x09\xa4\x00\x00\x00\x50\x00\x5a\xf0\x84\
\x00\x00\x00\x65\x02\xf0\x8c\x31\x00\x00\x00\x7a\x05\x93\x08\xe5\
\x00\x00\x00\x99\x05\x9b\xa6\x44\x00\x00\x00\xb2\x06\x3c\xe8\x53\
\x00\x00\x00\xcb\x06\xec\x79\x65\x00\x00\x00\xea\x0c\x4e\x30\xd8\
\x00\x00\x01\x0b\x0e\x9f\xe7\x05\x00\x00\x01\x26\x69\x00\x00\x01\
\x63\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x0a\x00\x56\x00\x69\x00\x73\x00\x74\x00\
\x61\x05\x00\x05\xcf\xc7\x01\x03\x00\x00\x00\x0a\x00\x26\x00\x46\
\x00\x69\x00\x6c\x00\x65\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\
\x0a\x00\x26\x00\x45\x00\x73\x00\x63\x00\x69\x05\x00\x47\xdf\x04\
\x01\x03\x00\x00\x00\x0a\x00\x50\x00\x72\x00\x69\x00\x6d\x00\x6f\
\x05\x00\x4d\x09\xa4\x01\x03\x00\x00\x00\x0a\x00\x54\x00\x65\x00\
\x72\x00\x7a\x00\x6f\x05\x00\x5a\xf0\x84\x01\x03\x00\x00\x00\x14\
\x00\x4c\x00\x69\x00\x6e\x00\x67\x00\x75\x00\x61\x00\x3a\x00\x20\
\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\x03\x00\x00\x00\x0e\x00\
\x4f\x00\x62\x00\x6c\x00\x69\x00\x71\x00\x75\x00\x61\x05\x05\x93\
\x08\xe5\x01\x03\x00\x00\x00\x0e\x00\x53\x00\x65\x00\x63\x00\x6f\
\x00\x6e\x00\x64\x00\x6f\x05\x05\x9b\xa6\x44\x01\x03\x00\x00\x00\
\x14\x00\x49\x00\x73\x00\x6f\x00\x6d\x00\x65\x00\x74\x00\x72\x00\
\x69\x00\x63\x00\x61\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x16\
\x00\x50\x00\x72\x00\x6f\x00\x73\x00\x70\x00\x65\x00\x74\x00\x74\
\x00\x69\x00\x63\x00\x61\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\
\x10\x00\x49\x00\x74\x00\x61\x00\x6c\x00\x69\x00\x61\x00\x6e\x00\
\x6f\x05\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x32\x00\x45\x00\x73\
\x00\x65\x00\x6d\x00\x70\x00\x69\x00\x6f\x00\x20\x00\x64\x00\x69\
\x00\x20\x00\x6c\x00\x6f\x00\x63\x00\x61\x00\x6c\x00\x69\x00\x7a\
\x00\x7a\x00\x61\x00\x7a\x00\x69\x00\x6f\x00\x6e\x00\x65\x05\x0e\
\x9f\xe7\x05\x01\x2f\x00\x00\x01\x3e\x00\x97\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x4d\x61\x69\x6e\
\x57\x69\x6e\x64\x6f\x77\x00\
\x00\x00\x03\x24\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x22\x00\x47\xdf\x04\
\x00\x00\x00\x35\x00\x4d\x09\xa4\x00\x00\x00\x50\x00\x5a\xf0\x84\
\x00\x00\x00\x67\x02\xf0\x8c\x31\x00\x00\x00\x7e\x05\x93\x08\xe5\
\x00\x00\x00\x9b\x05\x9b\xa6\x44\x00\x00\x00\xb2\x06\x3c\xe8\x53\
\x00\x00\x00\xc7\x06\xec\x79\x65\x00\x00\x00\xe6\x0c\x4e\x30\xd8\
\x00\x00\x01\x05\x0e\x9f\xe7\x05\x00\x00\x01\x1a\x69\x00\x00\x01\
\x5f\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x06\x00\x56\x00\x69\x00\x73\x05\x00\x05\
\xcf\xc7\x01\x03\x00\x00\x00\x08\x00\x26\x00\x46\x00\x69\x00\x6c\
\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\x10\x00\x26\x00\x41\x00\
\x76\x00\x73\x00\x6c\x00\x75\x00\x74\x00\x74\x05\x00\x47\xdf\x04\
\x01\x03\x00\x00\x00\x0c\x00\x46\x00\xf8\x00\x72\x00\x73\x00\x74\
\x00\x65\x05\x00\x4d\x09\xa4\x01\x03\x00\x00\x00\x0c\x00\x54\x00\
\x72\x00\x65\x00\x64\x00\x6a\x00\x65\x05\x00\x5a\xf0\x84\x01\x03\
\x00\x00\x00\x12\x00\x53\x00\x70\x00\x72\x00\xe5\x00\x6b\x00\x3a\
\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\x03\x00\x00\x00\
\x0c\x00\x53\x00\x6b\x00\x6a\x00\x65\x00\x76\x00\x74\x05\x05\x93\
\x08\xe5\x01\x03\x00\x00\x00\x0a\x00\x41\x00\x6e\x00\x64\x00\x72\
\x00\x65\x05\x05\x9b\xa6\x44\x01\x03\x00\x00\x00\x14\x00\x49\x00\
\x73\x00\x6f\x00\x6d\x00\x65\x00\x74\x00\x72\x00\x69\x00\x73\x00\
\x6b\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x14\x00\x50\x00\x65\
\x00\x72\x00\x73\x00\x70\x00\x65\x00\x6b\x00\x74\x00\x69\x00\x76\
\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\x0a\x00\x4e\x00\x6f\x00\
\x72\x00\x73\x00\x6b\x05\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x3a\
\x00\x49\x00\x6e\x00\x74\x00\x65\x00\x72\x00\x6e\x00\x61\x00\x73\
\x00\x6a\x00\x6f\x00\x6e\x00\x61\x00\x6c\x00\x69\x00\x73\x00\x65\
\x00\x72\x00\x69\x00\x6e\x00\x67\x00\x73\x00\x65\x00\x6b\x00\x73\
\x00\x65\x00\x6d\x00\x70\x00\x65\x00\x6c\x05\x0e\x9f\xe7\x05\x01\
\x2f\x00\x00\x01\x3e\x00\x97\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x0a\x4d\x61\x69\x6e\x57\x69\x6e\x64\
\x6f\x77\x00\
\x00\x00\x03\x24\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x22\x00\x47\xdf\x04\
\x00\x00\x00\x3b\x00\x4d\x09\xa4\x00\x00\x00\x54\x00\x5a\xf0\x84\
\x00\x00\x00\x69\x02\xf0\x8c\x31\x00\x00\x00\x7e\x05\x93\x08\xe5\
\x00\x00\x00\x9d\x05\x9b\xa6\x44\x00\x00\x00\xb4\x06\x3c\xe8\x53\
\x00\x00\x00\xcd\x06\xec\x79\x65\x00\x00\x00\xec\x0c\x4e\x30\xd8\
\x00\x00\x01\x09\x0e\x9f\xe7\x05\x00\x00\x01\x24\x69\x00\x00\x01\
\x5f\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x06\x03\x8c\x03\xc8\x03\xb7\x05\x00\x05\
\xcf\xc7\x01\x03\x00\x00\x00\x0e\x00\x26\x03\x91\x03\xc1\x03\xc7\
\x03\xb5\x03\xaf\x03\xbf\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\
\x0e\x03\x88\x00\x26\x03\xbe\x03\xbf\x03\xb4\x03\xbf\x03\xc2\x05\
\x00\x47\xdf\x04\x01\x03\x00\x00\x00\x0a\x03\xa0\x03\xc1\x03\xce\
\x03\xc4\x03\xbf\x05\x00\x4d\x09\xa4\x01\x03\x00\x00\x00\x0a\x03\
\xa4\x03\xc1\x03\xaf\x03\xc4\x03\xbf\x05\x00\x5a\xf0\x84\x01\x03\
\x00\x00\x00\x14\x03\x93\x03\xbb\x03\xce\x03\xc3\x03\xc3\x03\xb1\
\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\x03\x00\
\x00\x00\x0c\x03\xa0\x03\xbb\x03\xac\x03\xb3\x03\xb9\x03\xb1\x05\
\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x0e\x03\x94\x03\xb5\x03\xcd\
\x03\xc4\x03\xb5\x03\xc1\x03\xbf\x05\x05\x9b\xa6\x44\x01\x03\x00\
\x00\x00\x14\x03\x99\x03\xc3\x03\xbf\x03\xbc\x03\xb5\x03\xc4\x03\
\xc1\x03\xb9\x03\xba\x03\xae\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\
\x00\x12\x03\xa0\x03\xc1\x03\xbf\x03\xbf\x03\xc0\x03\xc4\x03\xb9\
\x03\xba\x03\xae\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\x10\x03\
\x95\x03\xbb\x03\xbb\x03\xb7\x03\xbd\x03\xb9\x03\xba\x03\xac\x05\
\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x30\x03\xa0\x03\xb1\x03\xc1\
\x03\xac\x03\xb4\x03\xb5\x03\xb9\x03\xb3\x03\xbc\x03\xb1\x00\x20\
\x03\xb4\x03\xb9\x03\xb5\x03\xb8\x03\xbd\x03\xbf\x03\xc0\x03\xbf\
\x03\xaf\x03\xb7\x03\xc3\x03\xb7\x03\xc2\x05\x0e\x9f\xe7\x05\x01\
\x2f\x00\x00\x01\x3e\x00\x97\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x0a\x4d\x61\x69\x6e\x57\x69\x6e\x64\
\x6f\x77\x00\
\x00\x00\x03\x26\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x2a\x00\x47\xdf\x04\
\x00\x00\x00\x45\x00\x4d\x09\xa4\x00\x00\x00\x5a\x00\x5a\xf0\x84\
\x00\x00\x00\x6d\x02\xf0\x8c\x31\x00\x00\x00\x80\x05\x93\x08\xe5\
\x00\x00\x00\x9f\x05\x9b\xa6\x44\x00\x00\x00\xb8\x06\x3c\xe8\x53\
\x00\x00\x00\xc9\x06\xec\x79\x65\x00\x00\x00\xe6\x0c\x4e\x30\xd8\
\x00\x00\x01\x07\x0e\x9f\xe7\x05\x00\x00\x01\x24\x69\x00\x00\x01\
\x61\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x0e\x00\x41\x00\x73\x00\x70\x00\x65\x00\
\x6b\x00\x74\x00\x6f\x05\x00\x05\xcf\xc7\x01\x03\x00\x00\x00\x10\
\x00\x26\x00\x44\x00\x6f\x00\x73\x00\x69\x00\x65\x00\x72\x00\x6f\
\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\x0a\x00\x26\x00\x46\x00\
\x69\x00\x6e\x00\x69\x05\x00\x47\xdf\x04\x01\x03\x00\x00\x00\x08\
\x00\x55\x00\x6e\x00\x75\x00\x65\x05\x00\x4d\x09\xa4\x01\x03\x00\
\x00\x00\x08\x00\x54\x00\x72\x00\x69\x00\x65\x05\x00\x5a\xf0\x84\
\x01\x03\x00\x00\x00\x14\x00\x4c\x00\x69\x00\x6e\x00\x67\x00\x76\
\x00\x6f\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\
\x03\x00\x00\x00\x0e\x00\x4f\x00\x62\x00\x6c\x00\x69\x00\x6b\x00\
\x76\x00\x61\x05\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x06\x00\x44\
\x00\x75\x00\x65\x05\x05\x9b\xa6\x44\x01\x03\x00\x00\x00\x12\x00\
\x49\x00\x73\x00\x6f\x00\x6d\x00\x65\x00\x74\x00\x72\x00\x69\x00\
\x61\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x16\x00\x50\x00\x65\
\x00\x72\x00\x73\x00\x70\x00\x65\x00\x6b\x00\x74\x00\x69\x00\x76\
\x00\x61\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\x12\x00\x45\x00\
\x73\x00\x70\x00\x65\x00\x72\x00\x61\x00\x6e\x00\x74\x00\x6f\x05\
\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x32\x00\x45\x00\x6b\x00\x7a\
\x00\x65\x00\x6d\x00\x70\x00\x6c\x00\x6f\x00\x20\x00\x70\x00\x72\
\x00\x69\x00\x20\x00\x69\x00\x6e\x00\x74\x00\x65\x00\x72\x00\x6e\
\x00\x61\x00\x63\x00\x69\x00\x69\x00\x67\x00\x6f\x05\x0e\x9f\xe7\
\x05\x01\x2f\x00\x00\x01\x3e\x00\x97\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x4d\x61\x69\x6e\x57\x69\
\x6e\x64\x6f\x77\x00\
\x00\x00\x03\x2a\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x24\x00\x47\xdf\x04\
\x00\x00\x00\x39\x00\x4d\x09\xa4\x00\x00\x00\x4e\x00\x5a\xf0\x84\
\x00\x00\x00\x63\x02\xf0\x8c\x31\x00\x00\x00\x78\x05\x93\x08\xe5\
\x00\x00\x00\x9b\x05\x9b\xa6\x44\x00\x00\x00\xb4\x06\x3c\xe8\x53\
\x00\x00\x00\xcb\x06\xec\x79\x65\x00\x00\x00\xe8\x0c\x4e\x30\xd8\
\x00\x00\x01\x09\x0e\x9f\xe7\x05\x00\x00\x01\x22\x69\x00\x00\x01\
\x65\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x08\x00\x56\x00\x69\x00\x65\x00\x77\x05\
\x00\x05\xcf\xc7\x01\x03\x00\x00\x00\x0a\x00\x26\x00\x46\x00\x69\
\x00\x6c\x00\x65\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\x0a\x00\
\x45\x00\x26\x00\x78\x00\x69\x00\x74\x05\x00\x47\xdf\x04\x01\x03\
\x00\x00\x00\x0a\x00\x46\x00\x69\x00\x72\x00\x73\x00\x74\x05\x00\
\x4d\x09\xa4\x01\x03\x00\x00\x00\x0a\x00\x54\x00\x68\x00\x69\x00\
\x72\x00\x64\x05\x00\x5a\xf0\x84\x01\x03\x00\x00\x00\x18\x00\x4c\
\x00\x61\x00\x6e\x00\x67\x00\x75\x00\x61\x00\x67\x00\x65\x00\x3a\
\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\x03\x00\x00\x00\
\x0e\x00\x4f\x00\x62\x00\x6c\x00\x69\x00\x71\x00\x75\x00\x65\x05\
\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x0c\x00\x53\x00\x65\x00\x63\
\x00\x6f\x00\x6e\x00\x64\x05\x05\x9b\xa6\x44\x01\x03\x00\x00\x00\
\x12\x00\x49\x00\x73\x00\x6f\x00\x6d\x00\x65\x00\x74\x00\x72\x00\
\x69\x00\x63\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x16\x00\x50\
\x00\x65\x00\x72\x00\x73\x00\x70\x00\x65\x00\x63\x00\x74\x00\x69\
\x00\x76\x00\x65\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\x0e\x00\
\x45\x00\x6e\x00\x67\x00\x6c\x00\x69\x00\x73\x00\x68\x05\x0c\x4e\
\x30\xd8\x01\x03\x00\x00\x00\x38\x00\x49\x00\x6e\x00\x74\x00\x65\
\x00\x72\x00\x6e\x00\x61\x00\x74\x00\x69\x00\x6f\x00\x6e\x00\x61\
\x00\x6c\x00\x69\x00\x7a\x00\x61\x00\x74\x00\x69\x00\x6f\x00\x6e\
\x00\x20\x00\x45\x00\x78\x00\x61\x00\x6d\x00\x70\x00\x6c\x00\x65\
\x05\x0e\x9f\xe7\x05\x01\x2f\x00\x00\x01\x3e\x00\x97\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x4d\x61\
\x69\x6e\x57\x69\x6e\x64\x6f\x77\x00\
\x00\x00\x02\xd2\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x24\x00\x47\xdf\x04\
\x00\x00\x00\x3f\x00\x4d\x09\xa4\x00\x00\x00\x56\x00\x5a\xf0\x84\
\x00\x00\x00\x67\x02\xf0\x8c\x31\x00\x00\x00\x78\x05\x93\x08\xe5\
\x00\x00\x00\x8f\x05\x9b\xa6\x44\x00\x00\x00\xa4\x06\x3c\xe8\x53\
\x00\x00\x00\xb5\x06\xec\x79\x65\x00\x00\x00\xca\x0c\x4e\x30\xd8\
\x00\x00\x00\xdb\x0e\x9f\xe7\x05\x00\x00\x00\xec\x69\x00\x00\x01\
\x0d\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x08\x88\x68\x79\x3a\x65\xb9\x5f\x0f\x05\
\x00\x05\xcf\xc7\x01\x03\x00\x00\x00\x10\x30\xd5\x30\xa1\x30\xa4\
\x30\xeb\x00\x28\x00\x26\x00\x46\x00\x29\x05\x00\x2a\xd0\x25\x01\
\x03\x00\x00\x00\x0c\x7d\x42\x4e\x86\x00\x28\x00\x26\x00\x58\x00\
\x29\x05\x00\x47\xdf\x04\x01\x03\x00\x00\x00\x06\x7b\x2c\x4e\x00\
\x88\x4c\x05\x00\x4d\x09\xa4\x01\x03\x00\x00\x00\x06\x7b\x2c\x4e\
\x09\x88\x4c\x05\x00\x5a\xf0\x84\x01\x03\x00\x00\x00\x0c\x8a\x00\
\x8a\x9e\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\
\x03\x00\x00\x00\x0a\x65\x9c\x30\x81\x62\x95\x5f\x71\x6c\xd5\x05\
\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x06\x7b\x2c\x4e\x8c\x88\x4c\
\x05\x05\x9b\xa6\x44\x01\x03\x00\x00\x00\x0a\x7b\x49\x89\xd2\x62\
\x95\x5f\x71\x6c\xd5\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x06\
\x90\x60\x8f\xd1\x6c\xd5\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\
\x06\x65\xe5\x67\x2c\x8a\x9e\x05\x0c\x4e\x30\xd8\x01\x03\x00\x00\
\x00\x16\x56\xfd\x96\x9b\x53\x16\x00\x28\x00\x69\x00\x31\x00\x38\
\x00\x6e\x00\x29\x30\x6e\x4f\x8b\x05\x0e\x9f\xe7\x05\x01\x2f\x00\
\x00\x01\x3e\x00\x97\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x0a\x4d\x61\x69\x6e\x57\x69\x6e\x64\x6f\x77\
\x00\
"
qt_resource_name = "\
\x00\x0c\
\x0d\xfc\x11\x13\
\x00\x74\
\x00\x72\x00\x61\x00\x6e\x00\x73\x00\x6c\x00\x61\x00\x74\x00\x69\x00\x6f\x00\x6e\x00\x73\
\x00\x0a\
\x04\x50\xdc\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x66\x00\x72\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x6f\xac\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x6b\x00\x6f\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x65\x0c\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x72\x00\x75\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x67\x1c\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x73\x00\x76\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x58\x0c\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x64\x00\x65\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x7d\x3c\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x7a\x00\x68\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x55\xdc\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x61\x00\x72\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x57\xec\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x63\x00\x73\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x6d\xfc\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x69\x00\x74\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x68\xac\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x6e\x00\x6f\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x56\x7c\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x65\x00\x6c\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x59\xac\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x65\x00\x6f\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x59\x9c\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x65\x00\x6e\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x6c\xbc\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x6a\x00\x70\x00\x2e\x00\x71\x00\x6d\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x0e\x00\x00\x00\x02\
\x00\x00\x00\x1e\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\xba\x00\x00\x00\x00\x00\x01\x00\x00\x12\x76\
\x00\x00\x01\x22\x00\x00\x00\x00\x00\x01\x00\x00\x1e\xce\
\x00\x00\x00\xd4\x00\x00\x00\x00\x00\x01\x00\x00\x15\x5a\
\x00\x00\x00\x86\x00\x00\x00\x00\x00\x01\x00\x00\x0c\x62\
\x00\x00\x01\x56\x00\x00\x00\x00\x00\x01\x00\x00\x25\x20\
\x00\x00\x01\x3c\x00\x00\x00\x00\x00\x01\x00\x00\x21\xf6\
\x00\x00\x00\x52\x00\x00\x00\x00\x00\x01\x00\x00\x06\x06\
\x00\x00\x00\x6c\x00\x00\x00\x00\x00\x01\x00\x00\x09\x30\
\x00\x00\x01\x08\x00\x00\x00\x00\x00\x01\x00\x00\x1b\xa6\
\x00\x00\x01\x70\x00\x00\x00\x00\x00\x01\x00\x00\x28\x4e\
\x00\x00\x00\xee\x00\x00\x00\x00\x00\x01\x00\x00\x18\x7a\
\x00\x00\x00\x38\x00\x00\x00\x00\x00\x01\x00\x00\x03\x50\
\x00\x00\x00\xa0\x00\x00\x00\x00\x00\x01\x00\x00\x0f\xb6\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 59.152727
| 96
| 0.725374
|
00fa955043ccf40e0e6edc6b528b09cf03de6ad4
| 369
|
py
|
Python
|
Q3a.py
|
Gerrydh/Algor
|
de9a63421b21876f61af49749e76e14dd57e233b
|
[
"Apache-2.0"
] | null | null | null |
Q3a.py
|
Gerrydh/Algor
|
de9a63421b21876f61af49749e76e14dd57e233b
|
[
"Apache-2.0"
] | null | null | null |
Q3a.py
|
Gerrydh/Algor
|
de9a63421b21876f61af49749e76e14dd57e233b
|
[
"Apache-2.0"
] | null | null | null |
def contains_duplicates(elements):
for i in range(0, len(elements)):
for j in range (0, len(elements)):
if i == j:
continue
if elements[i] == elements[j]:
return True
return False
test1 = [10, 0, 5, 3, -19, 5]
test2 = [0, 1, 0, -127, 346, 125]
print(contains_duplicates(test1))
| 26.357143
| 43
| 0.509485
|
b73e37910bb084e49a4ba4499a04b41b74847fe3
| 3,306
|
py
|
Python
|
platypush/backend/http/app/routes/plugins/sound/__init__.py
|
RichardChiang/platypush
|
1777ebb0516118cdef20046a92caab496fa7c6cb
|
[
"MIT"
] | null | null | null |
platypush/backend/http/app/routes/plugins/sound/__init__.py
|
RichardChiang/platypush
|
1777ebb0516118cdef20046a92caab496fa7c6cb
|
[
"MIT"
] | null | null | null |
platypush/backend/http/app/routes/plugins/sound/__init__.py
|
RichardChiang/platypush
|
1777ebb0516118cdef20046a92caab496fa7c6cb
|
[
"MIT"
] | null | null | null |
import os
import tempfile
from flask import Response, Blueprint, request
from platypush.backend.http.app import template_folder
from platypush.backend.http.app.utils import authenticate, send_request
sound = Blueprint('sound', __name__, template_folder=template_folder)
# Declare routes list
__routes__ = [
sound,
]
# Generates the .wav file header for a given set of samples and specs
# noinspection PyRedundantParentheses
def gen_header(sample_rate, sample_width, channels):
datasize = int(2000 * 1e6) # Arbitrary data size for streaming
o = bytes("RIFF", ' ascii') # (4byte) Marks file as RIFF
o += (datasize + 36).to_bytes(4, 'little') # (4byte) File size in bytes
o += bytes("WAVE", 'ascii') # (4byte) File type
o += bytes("fmt ", 'ascii') # (4byte) Format Chunk Marker
o += (16).to_bytes(4, 'little') # (4byte) Length of above format data
o += (1).to_bytes(2, 'little') # (2byte) Format type (1 - PCM)
o += channels.to_bytes(2, 'little') # (2byte)
o += sample_rate.to_bytes(4, 'little') # (4byte)
o += (sample_rate * channels * sample_width // 8).to_bytes(4, 'little') # (4byte)
o += (channels * sample_width // 8).to_bytes(2, 'little') # (2byte)
o += sample_width.to_bytes(2, 'little') # (2byte)
o += bytes("data", 'ascii') # (4byte) Data Chunk Marker
o += datasize.to_bytes(4, 'little') # (4byte) Data size in bytes
return o
def audio_feed(device, fifo, sample_rate, blocksize, latency, channels):
send_request(action='sound.stream_recording', device=device, sample_rate=sample_rate,
dtype='int16', fifo=fifo, blocksize=blocksize, latency=latency,
channels=channels)
try:
with open(fifo, 'rb') as f:
send_header = True
while True:
audio = f.read(blocksize)
if audio:
if send_header:
audio = gen_header(sample_rate=sample_rate, sample_width=16, channels=channels) + audio
send_header = False
yield audio
finally:
send_request(action='sound.stop_recording')
@sound.route('/sound/stream', methods=['GET'])
@authenticate()
def get_sound_feed():
device = request.args.get('device')
sample_rate = request.args.get('sample_rate', 44100)
blocksize = request.args.get('blocksize', 512)
latency = request.args.get('latency', 0)
channels = request.args.get('channels', 1)
fifo = request.args.get('fifo', os.path.join(tempfile.gettempdir(), 'inputstream'))
return Response(audio_feed(device=device, fifo=fifo, sample_rate=sample_rate,
blocksize=blocksize, latency=latency, channels=channels),
mimetype='audio/x-wav;codec=pcm')
# vim:sw=4:ts=4:et:
| 44.08
| 114
| 0.543255
|
31fa8f2ee2fc407e8a100ccee171cede4eb18399
| 9,929
|
py
|
Python
|
generate_maps_for_testing.py
|
abhiagni11/Deep-Learning-SDM
|
a0c30bf957543cac07f656b0edb23b52990af290
|
[
"MIT"
] | 2
|
2019-02-12T12:15:54.000Z
|
2019-03-10T02:37:37.000Z
|
generate_maps_for_testing.py
|
abhiagni11/Deep-Learning-SDM
|
a0c30bf957543cac07f656b0edb23b52990af290
|
[
"MIT"
] | null | null | null |
generate_maps_for_testing.py
|
abhiagni11/Deep-Learning-SDM
|
a0c30bf957543cac07f656b0edb23b52990af290
|
[
"MIT"
] | 2
|
2019-04-03T19:15:05.000Z
|
2019-11-05T00:46:52.000Z
|
#!/usr/bin/env python
'''
This file contains the procedure for map generation
Author: Manish Saroya
Contact: saroyam@oregonstate.edu
DARPA SubT Challenge
'''
import matplotlib.pyplot as plt
import numpy as np
import heapq
import random
class PriorityQueue:
def __init__(self):
self.elements = []
def empty(self):
return len(self.elements) == 0
def put(self, item, priority):
heapq.heappush(self.elements, (priority, item))
def get(self):
return heapq.heappop(self.elements)[1]
def heuristic(a, b):
(x1, y1) = a
(x2, y2) = b
return abs(x1 - x2) + abs(y1 - y2)
def a_star_search(grid, start, goal):
frontier = PriorityQueue()
frontier.put(start, 0)
came_from = {}
cost_so_far = {}
came_from[start] = None
cost_so_far[start] = 0
while not frontier.empty():
current = frontier.get()
if current == goal:
break
for d in dirs_motion:
x, y = d(current[0], current[1])
# check for bounds
if 0 <= x < len(grid) and 0 <= y < len(grid[0]):
next = (x,y)
# making all travel as cost 1
new_cost = cost_so_far[current] + 1
if next not in cost_so_far or new_cost < cost_so_far[next]:
cost_so_far[next] = new_cost
priority = new_cost + heuristic(goal, next)
frontier.put(next, priority)
came_from[next] = current
return came_from, cost_so_far
def getPath(grid, start, goal):
start = tuple(start)
goal = tuple(goal)
came_from_, cost_so_far_ = a_star_search(grid, start, goal)
pointer = goal
path = []
path.append(pointer)
while pointer != start:
path.append(came_from_[pointer])
pointer = came_from_[pointer]
return path
# create random points of interests.
def createPOI(numPoints, dimension):
pts = []
while len(pts) < numPoints:
point = [np.random.randint(0, dimension[0]), np.random.randint(0, dimension[1])]
if point not in pts:
pts.append(point)
return pts
def connectGrid(pts, grid):
for i in range(len(pts)):
for j in range(i+1, len(pts)):
path = getPath(np.zeros((len(grid), len(grid[0]))), pts[i], pts[j])
print("astarpath",path)
for k in path:
grid[k[0], k[1]] = 1
def sparseConnectGrid(pts, grid, near_entrance_point):
tree = []
tree.append(near_entrance_point)
#forbidden_points = {tuple(k): [] for k in pts}
for i in pts:
nearestPoints = nearestNeighbor(i, tree) #, forbidden_points[tuple(i)])
#forbidden_points[tuple(nearestPoint)].append(i)
for nearestPoint in nearestPoints:
if nearestPoint != i:
path = getPath(np.zeros((len(grid), len(grid[0]))), i, nearestPoint)
tree.append(i)
for k in path:
grid[k[0], k[1]] = 1
def nearestNeighbor(center, pts): #, forbidden):
distance = []
for i in pts:
#if i != center: #and (i not in forbidden):
distance.append(manhattanDist(i, center))
#else:
# distance.append(1000000)
nearestPoints = []
#nearestPoints.append(pts[np.argmin(distance)])
distance = np.array(distance)
print(distance)
indices = distance.argsort()[:2]
#print indices
nearestPoints.append(pts[indices[0]])
if random.uniform(0,1) > 0.8 and len(indices)>=2:
nearestPoints.append(pts[indices[1]])
return nearestPoints
def manhattanDist(p1,p2):
return abs(p1[0] - p2[0]) + abs(p1[1] - p2[1])
def connectEntrance(grid, entrance, pts):
distance = []
for i in pts:
distance.append(manhattanDist(i, entrance))
nearestPoint = pts[np.argmin(distance)]
print(nearestPoint)
if entrance != nearestPoint:
path = getPath(np.zeros((len(grid), len(grid[0]))), entrance, nearestPoint)
for i in path:
grid[i[0], i[1]] = 1
return nearestPoint
dirs_motion = [
lambda x, y: (x-1, y), # up
lambda x, y: (x+1, y), # down
lambda x, y: (x, y - 1), # left
lambda x, y: (x, y + 1), # right
]
def getTiles(gridDimension, numPOI, i=0):
#board = np.zeros((gridDimension[0],gridDimension[1]))
path_viz = np.zeros((gridDimension[0], gridDimension[1]))
points = createPOI(numPOI, gridDimension)
print("points", points)
#connectGrid(points, path_viz)
#sparseConnectGrid(points, path_viz)
entrance_point = [0, int(gridDimension[1]/2)]
# Connecting Entrance to the nearest point of interest
near_entrance_point = connectEntrance(path_viz,entrance_point,points)
sparseConnectGrid(points, path_viz, near_entrance_point)
tiles = np.zeros((gridDimension[0], gridDimension[1]))
for x in range(len(path_viz)):
for y in range(len(path_viz[0])):
# get all the possible direction values.
dir_vector = []
for d in dirs_motion:
nx, ny = d(x, y)
if 0 <= nx < len(path_viz) and 0 <= ny < len(path_viz[0]):
dir_vector.append(path_viz[nx, ny])
else:
dir_vector.append(0)
# Connect with the entrance
if entrance_point[0] == x and entrance_point[1] == y:
print("equating entrance", entrance_point, x, y)
dir_vector[0] = 1
# check whether the current point needs a tile.
if path_viz[x,y] == 1:
if dir_vector[0] == 1 \
and dir_vector[1] == 1 \
and dir_vector[2] == 1 \
and dir_vector[3] == 1:
if [x,y] not in points:
tiles[x,y] = 111
else:
tiles[x,y] = 10 # 10 is the code for Plus connection.
elif dir_vector[0] == 1 \
and dir_vector[1] == 1 \
and dir_vector[2] == 1 \
and dir_vector[3] == 0:
tiles[x,y] = 21 # 10 is the code for Plus connection.
elif dir_vector[0] == 1 \
and dir_vector[1] == 1 \
and dir_vector[2] == 0 \
and dir_vector[3] == 1:
tiles[x,y] = 22 # 10 is the code for Plus connection.
elif dir_vector[0] == 1 \
and dir_vector[1] == 0 \
and dir_vector[2] == 1 \
and dir_vector[3] == 1:
tiles[x,y] = 23 # 10 is the code for Plus connection.
elif dir_vector[0] == 0 \
and dir_vector[1] == 1 \
and dir_vector[2] == 1 \
and dir_vector[3] == 1:
tiles[x,y] = 24 # 10 is the code for Plus connection.
elif sum(dir_vector) == 1:
print("sum", sum(dir_vector))
if dir_vector[0] == 1:
tiles[x,y] = 31 # 10 is the code for Plus connection.
elif dir_vector[1] == 1:
tiles[x,y] = 32
elif dir_vector[2] == 1:
tiles[x,y] = 33
elif dir_vector[3] == 1:
tiles[x,y] = 34
elif dir_vector[0] == 1 \
and dir_vector[1] == 1 \
and dir_vector[2] == 0 \
and dir_vector[3] == 0:
tiles[x,y] = 11 # 11 is the code for straight connection along x axis.
elif dir_vector[0] == 0 \
and dir_vector[1] == 0 \
and dir_vector[2] == 1 \
and dir_vector[3] == 1:
tiles[x,y] = 12 # 12 is the code for straight connection along y axis, make yaw pi/2.
elif dir_vector[0] == 1 \
and dir_vector[1] == 0 \
and dir_vector[2] == 1 \
and dir_vector[3] == 0:
tiles[x,y] = 13 # 13 is the code for turn with yaw 0.
elif dir_vector[0] == 1 \
and dir_vector[1] == 0 \
and dir_vector[2] == 0 \
and dir_vector[3] == 1:
tiles[x,y] = 14 # 14 is the code for turn with yaw -pi/2.
elif dir_vector[0] == 0 \
and dir_vector[1] == 1 \
and dir_vector[2] == 1 \
and dir_vector[3] == 0:
tiles[x,y] = 15 # 15 is the code for turn with yaw pi/2.
elif dir_vector[0] == 0 \
and dir_vector[1] == 1 \
and dir_vector[2] == 0 \
and dir_vector[3] == 1:
tiles[x,y] = 16 # 16 is the code for turn with yaw pi.
print(path_viz)
print(tiles)
# plt.imshow(path_viz)
# plt.ylabel('x')
# plt.xlabel('y')
# plt.show()
return tiles, path_viz
GRID_SIZE = 24
NUMBER_OF_DATA_POINTS = 100
gridDimension = [GRID_SIZE, GRID_SIZE]
numPOI = 15
for i in range(NUMBER_OF_DATA_POINTS):
tiles, path_viz = getTiles(gridDimension,numPOI,i)
test = (np.logical_or.reduce((tiles==31,tiles==32,tiles==33,tiles==34))).astype(int)
artifacts = []
for x in range(len(test)):
for y in range(len(test[0])):
if test[x,y] > 0:
artifacts.append([x,y])
np.save('maps_{}/artifacts_'.format(GRID_SIZE)+str(i), np.array(artifacts))
np.save('maps_{}/tunnel_'.format(GRID_SIZE)+str(i), path_viz)
# print(t)
| 33.887372
| 106
| 0.509618
|
361702569f575abffc83ac5ffaccbb311bea382a
| 1,857
|
py
|
Python
|
pip/commands/completion.py
|
methane/pip
|
ee44e13716cb8dad3b52f0ab222eb2c7ce107e48
|
[
"MIT"
] | 1
|
2019-03-26T02:49:16.000Z
|
2019-03-26T02:49:16.000Z
|
pip/commands/completion.py
|
methane/pip
|
ee44e13716cb8dad3b52f0ab222eb2c7ce107e48
|
[
"MIT"
] | null | null | null |
pip/commands/completion.py
|
methane/pip
|
ee44e13716cb8dad3b52f0ab222eb2c7ce107e48
|
[
"MIT"
] | null | null | null |
import sys
from pip.basecommand import Command
BASE_COMPLETION = """
# pip %(shell)s completion start%(script)s# pip %(shell)s completion end
"""
COMPLETION_SCRIPTS = {
'bash': """
_pip_completion()
{
COMPREPLY=( $( COMP_WORDS="${COMP_WORDS[*]}" \\
COMP_CWORD=$COMP_CWORD \\
PIP_AUTO_COMPLETE=1 $1 ) )
}
complete -o default -F _pip_completion pip
""", 'zsh': """
function _pip_completion {
local words cword
read -Ac words
read -cn cword
reply=( $( COMP_WORDS="$words[*]" \\
COMP_CWORD=$(( cword-1 )) \\
PIP_AUTO_COMPLETE=1 $words[1] ) )
}
compctl -K _pip_completion pip
"""}
class CompletionCommand(Command):
name = 'completion'
summary = 'A helper command to be used for command completion'
description = """
A helper command to be used for command completion."""
hidden = True
def __init__(self, *args, **kw):
super(CompletionCommand, self).__init__(*args, **kw)
self.parser.add_option(
'--bash', '-b',
action='store_const',
const='bash',
dest='shell',
help='Emit completion code for bash')
self.parser.add_option(
'--zsh', '-z',
action='store_const',
const='zsh',
dest='shell',
help='Emit completion code for zsh')
def run(self, options, args):
"""Prints the completion code of the given shell"""
shells = COMPLETION_SCRIPTS.keys()
shell_options = ['--'+shell for shell in sorted(shells)]
if options.shell in shells:
script = COMPLETION_SCRIPTS.get(options.shell, '')
print(BASE_COMPLETION % {'script': script, 'shell': options.shell})
else:
sys.stderr.write('ERROR: You must pass %s\n' % ' or '.join(shell_options))
| 30.442623
| 86
| 0.581583
|
dd15a06d3511f5b87603fe30c94213bc2ed4328d
| 25,218
|
py
|
Python
|
iceprod/core/pilot.py
|
WIPACrepo/iceprod
|
83615da9b0e764bc2498ac588cc2e2b3f5277235
|
[
"MIT"
] | 2
|
2017-01-23T17:12:41.000Z
|
2019-01-14T13:38:17.000Z
|
iceprod/core/pilot.py
|
WIPACrepo/iceprod
|
83615da9b0e764bc2498ac588cc2e2b3f5277235
|
[
"MIT"
] | 242
|
2016-05-09T18:46:51.000Z
|
2022-03-31T22:02:29.000Z
|
iceprod/core/pilot.py
|
WIPACrepo/iceprod
|
83615da9b0e764bc2498ac588cc2e2b3f5277235
|
[
"MIT"
] | 2
|
2017-03-27T09:13:40.000Z
|
2019-01-27T10:55:30.000Z
|
"""Pilot functionality"""
from __future__ import absolute_import, division, print_function
import os
import sys
import math
import time
import logging
import tempfile
import shutil
import random
from functools import partial
from collections import namedtuple
from datetime import datetime, timedelta
from glob import glob
import signal
import traceback
import asyncio
import concurrent.futures
import iceprod
from iceprod.core.functions import gethostname
from iceprod.core import to_file, constants
from iceprod.core import exe_json
from iceprod.core.exe import Config
from iceprod.core.resources import Resources
from iceprod.core.dataclasses import Number, String
import iceprod.core.logger
logger = logging.getLogger('pilot')
try:
import psutil
except ImportError:
psutil = None
try:
from setproctitle import setproctitle
except ImportError:
def setproctitle(name):
pass
class Pilot:
"""
A pilot task runner.
The pilot allows multiple tasks to run in sequence or parallel.
It keeps track of resource usage, killing anything that goes over
requested amounts.
Use as an async context manager::
async with Pilot(*args) as p:
await p.run()
Args:
config (dict): the configuration dictionary
runner (callable): the task/config runner
pilot_id (str): the pilot id
rpc (:py:class:`iceprod.core.exe_json.ServerComms`): RPC to server
debug (bool): debug mode (default False)
run_timeout (int): how often to check if a task is running
backoff_delay (int): what starting delay to use for exponential backoff
restrict_site (bool): restrict running tasks to explicitly requiring this site
"""
def __init__(self, config, runner, pilot_id, rpc=None, debug=False,
run_timeout=180, backoff_delay=1, restrict_site=False):
self.config = config
self.runner = runner
self.pilot_id = pilot_id
self.hostname = gethostname()
self.rpc = rpc
self.debug = debug
self.run_timeout = run_timeout
self.backoff_delay = backoff_delay
self.resource_interval = 1.0 # seconds between resouce measurements
self.query_params = {}
self.last_download = None
self.running = True
self.tasks = {}
try:
setproctitle('iceprod2_pilot({})'.format(pilot_id))
except Exception:
pass
logger.warning('pilot_id: %s', self.pilot_id)
logger.warning('hostname: %s', self.hostname)
# hint at resources for pilot
# don't pass them as raw, because that overrides condor
if 'resources' in config['options']:
for k in config['options']['resources']:
v = config['options']['resources'][k]
name = 'NUM_'+k.upper()
if k in ('cpu','gpu'):
name += 'S'
os.environ[name] = str(v)
self.resources = Resources(debug=self.debug)
if restrict_site:
if not self.resources.site:
logger.error('cannot restrict site, as the site is unknown')
else:
self.query_params['requirements.site'] = self.resources.site
self.start_time = time.time()
async def __aenter__(self):
# update pilot status
await self.rpc.update_pilot(self.pilot_id, tasks=[],
host=self.hostname, version=iceprod.__version__,
site=self.resources.site,
start_date=datetime.utcnow().isoformat(),
resources_available=self.resources.get_available(),
resources_claimed=self.resources.get_claimed())
loop = asyncio.get_event_loop()
# set up resource monitor
if psutil:
loop.create_task(self.resource_monitor())
else:
logger.warning('no psutil. not checking resource usage')
# set up signal handler
def handler(signum, frame):
logger.critical('termination signal received')
self.running = False
self.term_handler()
self.prev_signal = signal.signal(signal.SIGTERM, handler)
return self
async def __aexit__(self, exc_type, exc, tb):
try:
# make sure any child processes are dead
self.hard_kill()
if self.debug:
# append out, err, log
for dirs in glob('tmp*'):
for filename in (constants['stdout'], constants['stderr'],
constants['stdlog']):
if os.path.exists(os.path.join(dirs,filename)):
with open(filename,'a') as f:
print('', file=f)
print('----',dirs,'----', file=f)
with open(os.path.join(dirs,filename)) as f2:
print(f2.read(), file=f)
await self.rpc.delete_pilot(self.pilot_id)
except Exception:
logger.error('error in aexit', exc_info=True)
# restore previous signal handler
signal.signal(signal.SIGTERM, self.prev_signal)
def term_handler(self):
"""Handle a SIGTERM gracefully"""
logger.info('checking resources after SIGTERM')
start_time = time.time()
overages = self.resources.check_claims()
for task_id in list(self.tasks):
task = self.tasks[task_id]
try:
if task_id in overages:
reason = overages[task_id]
else:
reason = 'pilot SIGTERM'
# clean up task
used_resources = self.resources.get_final(task_id)
self.clean_task(task_id)
message = reason
message += '\n\npilot SIGTERM\npilot_id: {}'.format(self.pilot_id)
message += '\nhostname: {}'.format(self.hostname)
kwargs = {
'resources': used_resources,
'reason': reason,
'message': message,
}
if 'dataset_id' in task['config']['options']:
kwargs['dataset_id'] = task['config']['options']['dataset_id']
self.rpc.task_kill_sync(task_id, **kwargs)
except Exception:
pass
# stop the pilot
try:
self.rpc.delete_pilot_sync(self.pilot_id)
except Exception:
pass
sys.exit(1)
def hard_kill(self):
"""Forcefully kill any child processes"""
if psutil:
# kill children correctly
processes = psutil.Process().children(recursive=True)
processes.reverse()
for p in processes:
try:
p.kill()
except psutil.NoSuchProcess:
pass
except Exception:
logger.warning('error killing process',
exc_info=True)
for task in self.tasks.values():
try:
task['p'].kill()
except ProcessLookupError:
logger.warning('error killing process',
exc_info=True)
async def resource_monitor(self):
"""Monitor the tasks, killing any that go over resource limits"""
try:
sleep_time = self.resource_interval # check every X seconds
while self.running or self.tasks:
logger.debug('pilot monitor - checking resource usage')
start_time = time.time()
try:
overages = self.resources.check_claims()
for task_id in overages:
used_resources = self.resources.get_peak(task_id)
logger.warning('kill %r for going over resources: %r',
task_id, used_resources)
message = overages[task_id]
message += '\n\npilot_id: {}'.format(self.pilot_id)
message += '\nhostname: {}'.format(self.hostname)
kwargs = {
'resources': used_resources,
'reason': overages[task_id],
'message': message,
}
if 'dataset_id' in self.tasks[task_id]['config']['options']:
kwargs['dataset_id'] = self.tasks[task_id]['config']['options']['dataset_id']
self.clean_task(task_id)
await self.rpc.task_kill(task_id, **kwargs)
except Exception:
logger.error('error in resource_monitor', exc_info=True)
duration = time.time()-start_time
logger.debug('sleep_time %.2f, duration %.2f',sleep_time,duration)
if duration < sleep_time:
await asyncio.sleep(sleep_time-duration)
except Exception:
logger.error('pilot monitor died', exc_info=True)
logger.warning('pilot monitor exiting')
async def run(self):
"""Run the pilot"""
download_errors = max_download_errors = 5
iceprod_errors = max_iceprod_errors = 10
task_errors = max_task_errors = int(10**math.log10(10+self.resources.total['cpu']))
logger.info('max_errors: %d, %d', max_download_errors, max_task_errors)
tasks_running = 0
async def backoff():
"""Backoff for rate limiting"""
delay = 60+self.backoff_delay*(1+random.random())
logger.info('backoff %d', delay)
await asyncio.sleep(delay)
self.backoff_delay *= 2
while self.running or self.tasks:
while self.running:
# retrieve new task(s)
if self.last_download and time.time()-self.last_download < 60:
logger.warning('last download attempt too recent, backing off')
await asyncio.sleep(time.time()-self.last_download+1)
break
self.last_download = time.time()
#if self.resources.total['gpu'] and not self.resources.available['gpu']:
# logger.info('gpu pilot with no gpus left - not queueing')
# break
try:
task_configs = await self.rpc.download_task(
self.config['options']['gridspec'],
resources=self.resources.get_available(),
site=self.resources.site,
query_params=self.query_params)
except Exception:
download_errors -= 1
if download_errors < 1:
self.running = False
logger.warning('errors over limit, draining')
logger.error('cannot download task. current error count is %d',
max_download_errors-download_errors, exc_info=True)
await backoff()
continue
logger.info('task configs: %r', task_configs)
if not task_configs:
logger.info('no task available')
if not self.tasks:
self.running = False
logger.warning('no task available, draining')
break
else:
# start up new task(s)
for task_config in task_configs:
try:
task_id = task_config['options']['task_id']
except Exception:
iceprod_errors -= 1
if iceprod_errors < 1:
self.running = False
logger.warning('errors over limit, draining')
logger.error('error getting task_id from config')
break
try:
if 'resources' not in task_config['options']:
task_config['options']['resources'] = None
task_resources = self.resources.claim(task_id, task_config['options']['resources'])
task_config['options']['resources'] = task_resources
except Exception:
iceprod_errors -= 1
if iceprod_errors < 1:
self.running = False
logger.warning('errors over limit, draining')
logger.warning('error claiming resources %s', task_id,
exc_info=True)
message = 'pilot_id: {}\nhostname: {}\n\n'.format(self.pilot_id, self.hostname)
message += traceback.format_exc()
kwargs = {
'reason': 'failed to claim resources',
'message': message,
}
if 'dataset_id' in task_config['options']:
kwargs['dataset_id'] = task_config['options']['dataset_id']
await self.rpc.task_kill(task_id, **kwargs)
break
try:
f = self.create_task(task_config)
task = await f.__anext__()
task['iter'] = f
self.tasks[task_id] = task
except Exception:
iceprod_errors -= 1
if iceprod_errors < 1:
self.running = False
logger.warning('errors over limit, draining')
logger.warning('error creating task %s', task_id,
exc_info=True)
message = 'pilot_id: {}\nhostname: {}\n\n'.format(self.pilot_id, self.hostname)
message += traceback.format_exc()
kwargs = {
'reason': 'failed to create task',
'message': message,
}
if 'dataset_id' in task_config['options']:
kwargs['dataset_id'] = task_config['options']['dataset_id']
await self.rpc.task_kill(task_id, **kwargs)
self.clean_task(task_id)
break
# update pilot status
await self.rpc.update_pilot(self.pilot_id, tasks=list(self.tasks),
resources_available=self.resources.get_available(),
resources_claimed=self.resources.get_claimed())
if (self.resources.available['cpu'] < 1
or self.resources.available['memory'] < 1
or (self.resources.total['gpu'] and not self.resources.available['gpu'])):
logger.info('no resources left, so wait for tasks to finish')
break
# otherwise, backoff
await backoff()
# wait until we can queue more tasks
while self.running or self.tasks:
logger.info('wait while tasks are running. timeout=%r',self.run_timeout)
start_time = time.time()
while self.tasks and time.time()-self.run_timeout < start_time:
done,pending = await asyncio.wait([task['p'].wait() for task in self.tasks.values()],
timeout=self.resource_interval,
return_when=concurrent.futures.FIRST_COMPLETED)
if done:
break
tasks_running = len(self.tasks)
for task_id in list(self.tasks):
# check if any processes have died
proc = self.tasks[task_id]['p']
clean = False
if proc.returncode is not None:
f = self.tasks[task_id]['iter']
try:
task = await f.__anext__()
except StopAsyncIteration:
logger.warning('task %s finished', task_id)
except Exception:
logger.warning('task %s failed', task_id,
exc_info=True)
task_errors -= 1
else:
logger.warning('task %s yielded again', task_id)
task['iter'] = f
self.tasks[task_id] = task
continue
# if we got here, the task is done
clean = True
# make sure the task is not running anymore
try:
await self.rpc.still_running(task_id)
except Exception:
pass
else:
logger.warning('task %s is still running, so killing it', task_id)
kwargs = {
'reason': 'task exited with return code {}'.format(proc.returncode),
'message': 'task exited with return code {}'.format(proc.returncode),
'resources': self.resources.get_final(task_id),
}
if 'dataset_id' in self.tasks[task_id]['config']['options']:
kwargs['dataset_id'] = self.tasks[task_id]['config']['options']['dataset_id']
await self.rpc.task_kill(task_id, **kwargs)
else:
# check if the DB has killed a task
try:
await self.rpc.still_running(task_id)
except Exception:
logger.warning('task %s killed by db', task_id)
kwargs = {
'reason': 'server kill',
'message': 'The server has marked the task as no longer running',
}
if 'dataset_id' in self.tasks[task_id]['config']['options']:
kwargs['dataset_id'] = self.tasks[task_id]['config']['options']['dataset_id']
await self.rpc.task_kill(task_id, **kwargs)
clean = True
if clean:
self.clean_task(task_id)
if task_errors < 1:
self.running = False
logger.warning('errors over limit, draining')
# update pilot status
if (not self.tasks) or len(self.tasks) < tasks_running:
logger.info('%d tasks removed', tasks_running-len(self.tasks))
tasks_running = len(self.tasks)
await self.rpc.update_pilot(self.pilot_id, tasks=list(self.tasks),
resources_available=self.resources.get_available(),
resources_claimed=self.resources.get_claimed())
if self.running:
break
elif (self.running and self.resources.available['cpu'] > 1
and self.resources.available['memory'] > 1
and (self.resources.available['gpu'] or not self.resources.total['gpu'])):
logger.info('resources available, so request a task')
break
# last update for pilot state
await self.rpc.update_pilot(self.pilot_id, tasks=[],
resources_available=self.resources.get_available(),
resources_claimed=self.resources.get_claimed())
if task_errors < 1:
logger.critical('too many errors when running tasks')
raise RuntimeError('too many errors')
else:
logger.warning('cleanly stopping pilot')
async def create_task(self, config):
"""
Create a new Task and start running it
Args:
config (dict): The task config
"""
task_id = config['options']['task_id']
# add grid-specific config
for k in self.config['options']:
if k == 'resources':
pass
elif k not in config['options']:
config['options'][k] = self.config['options'][k]
tmpdir = tempfile.mkdtemp(suffix='.{}'.format(task_id), dir=os.getcwd())
config['options']['subprocess_dir'] = tmpdir
# start the task
r = config['options']['resources']
async for proc in self.runner(config, resources=self.resources):
ps = psutil.Process(proc.pid) if psutil else None
self.resources.register_process(task_id, ps, tmpdir)
data = {
'p': proc,
'process': ps,
'tmpdir': tmpdir,
'config': config,
}
yield data
def clean_task(self, task_id):
"""Clean up a Task.
Delete remaining processes and the task temp dir. Release resources
back to the pilot.
Args:
task_id (str): the task_id
"""
logger.info('cleaning task %s', task_id)
if task_id in self.tasks:
task = self.tasks[task_id]
del self.tasks[task_id]
# kill process if still running
try:
if psutil:
# kill children correctly
try:
processes = task['process'].children(recursive=True)
except psutil.NoSuchProcess:
pass # process already died
else:
processes.reverse()
processes.append(task['process'])
for p in processes:
try:
p.terminate()
except psutil.NoSuchProcess:
pass
except Exception:
logger.warning('error terminating process',
exc_info=True)
def on_terminate(proc):
logger.info("process %r terminated with exit code %r",
proc, proc.returncode)
try:
gone, alive = psutil.wait_procs(processes, timeout=0.1,
callback=on_terminate)
for p in alive:
try:
p.kill()
except psutil.NoSuchProcess:
pass
except Exception:
logger.warning('error killing process',
exc_info=True)
except Exception:
logger.warning('failed to kill processes',
exc_info=True)
task['p'].kill()
except ProcessLookupError:
pass # process already died
except Exception:
logger.warning('error deleting process', exc_info=True)
# copy stdout/stderr
try:
os.rename(os.path.join(task['tmpdir'],constants['stderr']), constants['stderr'])
os.rename(os.path.join(task['tmpdir'],constants['stdout']), constants['stdout'])
except Exception:
logger.warning('error copying std[out,err] files', exc_info=True)
# clean tmpdir
try:
if not self.debug:
shutil.rmtree(task['tmpdir'])
except Exception:
logger.warning('error deleting tmpdir', exc_info=True)
# return resources to pilot
try:
self.resources.release(task_id)
except Exception:
logger.warning('error releasing resources', exc_info=True)
| 43.705373
| 111
| 0.486795
|
356d6fea8495f543f83188dadbb78f18302e720c
| 1,595
|
py
|
Python
|
src/pandas_profiling/utils/cache.py
|
abhicantdraw/pandas-profiling
|
a12ebb7a94b9371df94bf611237a389d99f8bc00
|
[
"MIT"
] | 8,107
|
2018-01-07T23:27:39.000Z
|
2022-02-22T12:57:11.000Z
|
src/pandas_profiling/utils/cache.py
|
abhicantdraw/pandas-profiling
|
a12ebb7a94b9371df94bf611237a389d99f8bc00
|
[
"MIT"
] | 771
|
2018-01-06T11:33:08.000Z
|
2022-02-21T11:16:02.000Z
|
src/pandas_profiling/utils/cache.py
|
abhicantdraw/pandas-profiling
|
a12ebb7a94b9371df94bf611237a389d99f8bc00
|
[
"MIT"
] | 1,308
|
2018-01-08T21:22:08.000Z
|
2022-02-21T04:10:21.000Z
|
"""Dataset cache utility functions"""
import zipfile
from pathlib import Path
import requests
from pandas_profiling.utils.paths import get_data_path
def cache_file(file_name: str, url: str) -> Path:
"""Check if file_name already is in the data path, otherwise download it from url.
Args:
file_name: the file name
url: the URL of the dataset
Returns:
The relative path to the dataset
"""
data_path = get_data_path()
data_path.mkdir(exist_ok=True)
file_path = data_path / file_name
# If not exists, download and create file
if not file_path.exists():
response = requests.get(url)
file_path.write_bytes(response.content)
return file_path
def cache_zipped_file(file_name: str, url: str) -> Path:
"""Check if file_name already is in the data path, otherwise download it from url.
Args:
file_name: the file name
url: the URL of the dataset
Returns:
The relative path to the dataset
"""
data_path = get_data_path()
data_path.mkdir(exist_ok=True)
file_path = data_path / file_name
# If not exists, download and create file
if not file_path.exists():
response = requests.get(url)
if response.status_code != 200:
raise FileNotFoundError("Could not download resource")
tmp_path = data_path / "tmp.zip"
tmp_path.write_bytes(response.content)
with zipfile.ZipFile(tmp_path, "r") as zip_file:
zip_file.extract(file_path.name, data_path)
tmp_path.unlink()
return file_path
| 24.538462
| 86
| 0.668339
|
2d5699dbc784663a74de61b54b51a303b70a858f
| 12,612
|
py
|
Python
|
geoq/maps/views.py
|
mikiec84/geoq
|
84bf1349b4049c9ab41b88a36fd0d61837b509b5
|
[
"MIT"
] | null | null | null |
geoq/maps/views.py
|
mikiec84/geoq
|
84bf1349b4049c9ab41b88a36fd0d61837b509b5
|
[
"MIT"
] | 4
|
2021-06-10T23:08:33.000Z
|
2022-03-12T00:39:28.000Z
|
geoq/maps/views.py
|
mikiec84/geoq
|
84bf1349b4049c9ab41b88a36fd0d61837b509b5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# This technical data was produced for the U. S. Government under Contract No. W15P7T-13-C-F600, and
# is subject to the Rights in Technical Data-Noncommercial Items clause at DFARS 252.227-7013 (FEB 2012)
import json
from django.contrib.auth.decorators import login_required
from django.contrib.gis.geos import GEOSGeometry
from django.core import serializers
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.views.generic import ListView, View, DeleteView
from django.views.decorators.http import require_http_methods
from forms import MapForm, MapInlineFormset, UploadKMZForm, UploadJSONForm
from geoq.core.models import AOI
from geoq.locations.models import Counties
from models import Feature, FeatureType, Map, Layer, MapLayerUserRememberedParams, MapLayer, GeoeventsSource
from kmz_handler import save_kmz_file
from json import load
import logging
logger = logging.getLogger(__name__)
class CreateFeatures(View):
"""
Reads GeoJSON from post request and creates AOIS for each features.
"""
http_method_names = ['post']
def post(self, request, *args, **kwargs):
feature = None
tpi = request.META.get('HTTP_TEMP_POINT_ID', "none")
aoi = request.POST.get('aoi')
geometry = request.POST.get('geometry')
geojson = json.loads(geometry)
properties = geojson.get('properties')
aoi = AOI.objects.get(id=aoi)
job = getattr(aoi, 'job')
project = getattr(job, 'project')
template = properties.get('template') if properties else None
# TODO: handle exceptions
if template:
template = FeatureType.objects.get(id=template)
attrs = dict(aoi=aoi,
job=job,
project=project,
analyst=request.user,
template=template)
geometry = geojson.get('geometry')
geom_obj = GEOSGeometry(json.dumps(geometry))
attrs['the_geom'] = geom_obj
county_list = Counties.objects.filter(poly__contains=geom_obj.centroid.wkt)
county = None
if len(county_list):
county = str(county_list[0].name)
try:
feature = Feature(**attrs)
feature.full_clean()
if not feature.properties:
feature.properties = {}
if county:
feature.properties['county'] = county
feature.save()
except ValidationError as e:
response = HttpResponse(content=json.dumps(dict(errors=e.messages)), mimetype="application/json", status=400)
response['Temp-Point-Id'] = tpi
return response
# This feels a bit ugly but it does get the GeoJSON into the response
feature_json = serializers.serialize('json', [feature,])
feature_list = json.loads(feature_json)
feature_list[0]['geojson'] = feature.geoJSON(True)
response = HttpResponse(json.dumps(feature_list), mimetype="application/json")
response['Temp-Point-Id'] = tpi
return response
class EditFeatures(View):
"""
Reads feature info from post request and updates associated feature object.
"""
http_method_names = ['post']
def post(self, request, *args, **kwargs):
geometry = request.POST.get('geometry')
geojson = json.loads(geometry)
properties = geojson.get('properties')
try:
feature = Feature.objects.get(pk=properties.get('id'))
except ObjectDoesNotExist:
raise Http404
geometry = geojson.get('geometry')
feature.the_geom = GEOSGeometry(json.dumps(geometry))
template = properties.get('template') if properties else None
# TODO: handle exceptions
if template:
feature.template = FeatureType.objects.get(id=template)
try:
feature.full_clean()
feature.save()
except ValidationError as e:
return HttpResponse(content=json.dumps(dict(errors=e.messages)), mimetype="application/json", status=400)
return HttpResponse("{}", mimetype="application/json")
@login_required
@require_http_methods(["POST"])
def update_user_maplayer_param(request, *args, **kwargs):
user = request.user
try:
json_stuff = json.loads(request.body)
except ValueError:
return HttpResponse("{\"status\":\"Bad Request\"}", mimetype="application/json", status=400)
mlq = MapLayer.objects.filter(id=json_stuff['maplayer'])
if not mlq.exists():
return HttpResponse("{\"status:\":\"Bad Request\", \"reason\":\"MapLayer does not exist\"}", status=400)
else:
ml = mlq.get()
mlurpq = MapLayerUserRememberedParams.objects.filter(maplayer=ml, user=user)
if mlurpq.exists():
mlurp = mlurpq.get()
else:
mlurp = MapLayerUserRememberedParams(maplayer=ml, user=user, values={})
mlurp.values[json_stuff['param']] = json_stuff['newValue']
mlurp.save()
return HttpResponse(json.dumps(mlurp.values), mimetype="application/json", status=200)
def feature_delete(request, pk):
try:
feature = Feature.objects.get(pk=pk)
feature.delete()
except ObjectDoesNotExist:
raise Http404
return HttpResponse( content=pk, status=200 )
@login_required
def create_update_map(request, job_id, map_id):
if map_id:
map_obj = Map.objects.get(pk=map_id)
else:
map_obj = None
if request.method == 'POST':
form = MapForm(request.POST, prefix='map', instance=map_obj)
maplayers_formset = MapInlineFormset(request.POST, prefix='layers', instance=map_obj)
if form.is_valid() and maplayers_formset.is_valid():
form.save()
maplayers_formset.instance = form.instance
maplayers_formset.save()
return HttpResponseRedirect(reverse('job-detail', kwargs = {'pk': job_id}))
else:
form = MapForm(prefix='map', instance=map_obj)
maplayers_formset = MapInlineFormset(prefix='layers', instance=map_obj)
# form = [f for f in form if f.name not in ['zoom', 'projection', 'center_x', 'center_y']]
return render_to_response('core/generic_form.html', {
'form': form,
'layer_formset': maplayers_formset,
'custom_form': 'core/map_create.html',
'object': map_obj,
}, context_instance=RequestContext(request))
class MapListView(ListView):
model = Map
def get_context_data(self, **kwargs):
context = super(MapListView, self).get_context_data(**kwargs)
context['admin'] = self.request.user.has_perm('maps.add_map')
return context
class MapDelete(DeleteView):
model = Map
template_name = "core/generic_confirm_delete.html"
def get_success_url(self):
return reverse('map-list')
class FeatureTypeListView(ListView):
model = FeatureType
def get_context_data(self, **kwargs):
context = super(FeatureTypeListView, self).get_context_data(**kwargs)
context['admin'] = self.request.user.has_perm('maps.add_featuretype')
return context
class FeatureTypeDelete(DeleteView):
model = FeatureType
template_name = "core/generic_confirm_delete.html"
def get_success_url(self):
#TODO: Add a signal to context to
#tell user that is was sucessful.
return reverse('feature-type-list')
class LayerListView(ListView):
model = Layer
def get_context_data(self, **kwargs):
context = super(LayerListView, self).get_context_data(**kwargs)
context['admin'] = self.request.user.has_perm('maps.add_layer')
return context
class LayerImport(ListView):
model = Layer
template_name = "maps/layer_import.html"
def get_context_data(self, **kwargs):
context = super(LayerImport, self).get_context_data(**kwargs)
context['geoevents_sources'] = GeoeventsSource.objects.all()
return context
def post(self, request, *args, **kwargs):
layers = request.POST.getlist('layer')
for lay in layers:
layer = json.loads(lay)
# see if it's already in here. assume 'url' and 'layer' attributes make it unique
l = Layer.objects.filter(url=layer['url'], layer=layer['layer'])
if not l:
# add the layer
new_layer = Layer()
for key, value in layer.iteritems():
# if key == 'layer_params':
# # TODO: need to pass json object here
# pass
# else:
setattr(new_layer, key, value)
new_layer.save()
return HttpResponseRedirect(reverse('layer-list'))
class LayerDelete(DeleteView):
model = Layer
template_name = "core/generic_confirm_delete.html"
def get_success_url(self):
return reverse('layer-list')
class KMZLayerImport(ListView):
model = Layer
template_name = "maps/kmz_upload.html"
def get_context_data(self, **kwargs):
context = super(KMZLayerImport, self).get_context_data(**kwargs)
return context
def post(self, request, *args, **kwargs):
form = UploadKMZForm(request.POST, request.FILES)
if form.is_valid():
localdir = save_kmz_file(request.FILES['kmzfile'])
uri = request.build_absolute_uri(localdir)
if localdir != None:
layer = Layer.objects.create(name = request.POST['title'],type="KML",url=uri,layer="",styles="",description="")
return HttpResponseRedirect(reverse('layer-list'))
class JSONLayerImport(ListView):
model = Layer
template_name = "maps/json_upload.html"
def get_context_data(self, **kwargs):
context = super(JSONLayerImport, self).get_context_data(**kwargs)
return context
def post(self, request, *args, **kwargs):
form = UploadJSONForm(request.POST, request.FILES)
try:
dataFromFile = load(request.FILES["jsonfile"])
except ValueError as e:
##This is a bad jsonFile, We should never get to this point but it is the last layer of defense.
return HttpResponseRedirect(reverse('layer-list'))
#Check to make sure that we actually have data
if dataFromFile != None:
layerName = request.POST['title']
if not layerName.strip():
layerName = dataFromFile["name"]
#Due to the naming errors between the actual DB names and the exporting function built in the maps/models.py file for layers we have to do this in one line and not pretty.
layer = Layer.objects.create(id=dataFromFile["id"], name = layerName, image_format=dataFromFile["format"], type=dataFromFile["type"],
url=dataFromFile["url"], additional_domains=dataFromFile["subdomains"], layer=dataFromFile["layer"], transparent=dataFromFile["transparent"],
layer_params=dataFromFile["layerParams"], dynamic_params=dataFromFile["dynamicParams"], refreshrate=dataFromFile["refreshrate"],
token=dataFromFile["token"], attribution=dataFromFile["attribution"], spatial_reference=dataFromFile["spatialReference"],
layer_parsing_function=dataFromFile["layerParsingFunction"], enable_identify=dataFromFile["enableIdentify"],
root_field=dataFromFile["rootField"], info_format=dataFromFile["infoFormat"], fields_to_show=dataFromFile["fieldsToShow"],
description=dataFromFile["description"], downloadableLink=dataFromFile["downloadableLink"], layer_info_link=dataFromFile["layer_info_link"],
styles=dataFromFile["styles"])
return HttpResponseRedirect(reverse('layer-list'))
class JSONLayerExport(ListView):
model = Layer
def get(self, request, *args, **kwargs):
name = self.kwargs.get('pk').replace("%20", " ");
layer = Layer.objects.get(name__iexact = name)
layerJson = json.dumps(layer.layer_json(), indent=2);
return HttpResponse(layerJson, mimetype="application/json", status=200)
| 36.556522
| 183
| 0.646131
|
c407e6eac3604e3fbe1a387c14190203c23ef54e
| 385
|
py
|
Python
|
datahub/search/test/search_support/simplemodel/serializers.py
|
uktrade/data-hub-api
|
c698cba533ff002293b821d01916f6334549f778
|
[
"MIT"
] | 6
|
2019-12-02T16:11:24.000Z
|
2022-03-18T10:02:02.000Z
|
datahub/search/test/search_support/simplemodel/serializers.py
|
uktrade/data-hub-api
|
c698cba533ff002293b821d01916f6334549f778
|
[
"MIT"
] | 1,696
|
2019-10-31T14:08:37.000Z
|
2022-03-29T12:35:57.000Z
|
datahub/search/test/search_support/simplemodel/serializers.py
|
uktrade/data-hub-api
|
c698cba533ff002293b821d01916f6334549f778
|
[
"MIT"
] | 9
|
2019-11-22T12:42:03.000Z
|
2021-09-03T14:25:05.000Z
|
from rest_framework import serializers
from datahub.search.serializers import EntitySearchQuerySerializer
class SearchSimpleModelSerializer(EntitySearchQuerySerializer):
"""Serialiser used to validate simple model search POST bodies."""
name = serializers.CharField(required=False)
country = serializers.CharField(required=False)
SORT_BY_FIELDS = ('date', 'name')
| 29.615385
| 70
| 0.792208
|
56407d7f71f5fc360aa3d3311b76d5eca89f51e9
| 199
|
py
|
Python
|
python/qipy/test/projects/big_project/spam.py
|
vbarbaresi/qibuild
|
eab6b815fe0af49ea5c41ccddcd0dff2363410e1
|
[
"BSD-3-Clause"
] | null | null | null |
python/qipy/test/projects/big_project/spam.py
|
vbarbaresi/qibuild
|
eab6b815fe0af49ea5c41ccddcd0dff2363410e1
|
[
"BSD-3-Clause"
] | null | null | null |
python/qipy/test/projects/big_project/spam.py
|
vbarbaresi/qibuild
|
eab6b815fe0af49ea5c41ccddcd0dff2363410e1
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2012-2018 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the COPYING file.
def spam():
print "spam"
| 24.875
| 72
| 0.723618
|
9b71eb4c0d83623a28af7a3d1719c13b995a78b8
| 128
|
py
|
Python
|
compsocsite/groups/apps.py
|
ReedyChen/opra
|
86ce88c7219d92e321cd9aa3d0bc2bf631e4b90f
|
[
"MIT"
] | 8
|
2017-03-07T19:46:51.000Z
|
2021-06-01T01:41:37.000Z
|
compsocsite/groups/apps.py
|
ReedyChen/opra
|
86ce88c7219d92e321cd9aa3d0bc2bf631e4b90f
|
[
"MIT"
] | null | null | null |
compsocsite/groups/apps.py
|
ReedyChen/opra
|
86ce88c7219d92e321cd9aa3d0bc2bf631e4b90f
|
[
"MIT"
] | 9
|
2016-06-09T03:36:20.000Z
|
2019-09-11T20:56:23.000Z
|
from __future__ import unicode_literals
from django.apps import AppConfig
class GroupsConfig(AppConfig):
name = 'groups'
| 16
| 39
| 0.789063
|
edcf4a668697d20542a3285fbd2024ae6da637fa
| 24,766
|
py
|
Python
|
python/tvm/testing.py
|
h77h7/tvm-04.26
|
1bd8e6b921f392ae29b7672159326d94d40d6922
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 32
|
2021-12-09T07:55:32.000Z
|
2022-03-29T12:20:52.000Z
|
python/tvm/testing.py
|
h77h7/tvm-04.26
|
1bd8e6b921f392ae29b7672159326d94d40d6922
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 1
|
2019-10-22T21:09:49.000Z
|
2019-10-22T21:09:49.000Z
|
python/tvm/testing.py
|
h77h7/tvm-04.26
|
1bd8e6b921f392ae29b7672159326d94d40d6922
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 2
|
2019-10-22T08:48:21.000Z
|
2022-02-19T02:17:50.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unnecessary-comprehension
""" TVM testing utilities
Testing Markers
***************
We use pytest markers to specify the requirements of test functions. Currently
there is a single distinction that matters for our testing environment: does
the test require a gpu. For tests that require just a gpu or just a cpu, we
have the decorator :py:func:`requires_gpu` that enables the test when a gpu is
available. To avoid running tests that don't require a gpu on gpu nodes, this
decorator also sets the pytest marker `gpu` so we can use select the gpu subset
of tests (using `pytest -m gpu`).
Unfortunately, many tests are written like this:
.. python::
def test_something():
for target in all_targets():
do_something()
The test uses both gpu and cpu targets, so the test needs to be run on both cpu
and gpu nodes. But we still want to only run the cpu targets on the cpu testing
node. The solution is to mark these tests with the gpu marker so they will be
run on the gpu nodes. But we also modify all_targets (renamed to
enabled_targets) so that it only returns gpu targets on gpu nodes and cpu
targets on cpu nodes (using an environment variable).
Instead of using the all_targets function, future tests that would like to
test against a variety of targets should use the
:py:func:`tvm.testing.parametrize_targets` functionality. This allows us
greater control over which targets are run on which testing nodes.
If in the future we want to add a new type of testing node (for example
fpgas), we need to add a new marker in `tests/python/pytest.ini` and a new
function in this module. Then targets using this node should be added to the
`TVM_TEST_TARGETS` environment variable in the CI.
"""
import logging
import os
import sys
import time
import pytest
import numpy as np
import tvm
import tvm.arith
import tvm.tir
import tvm.te
import tvm._ffi
from tvm.contrib import nvcc
def assert_allclose(actual, desired, rtol=1e-7, atol=1e-7):
"""Version of np.testing.assert_allclose with `atol` and `rtol` fields set
in reasonable defaults.
Arguments `actual` and `desired` are not interchangable, since the function
compares the `abs(actual-desired)` with `atol+rtol*abs(desired)`. Since we
often allow `desired` to be close to zero, we generally want non-zero `atol`.
"""
actual = np.asanyarray(actual)
desired = np.asanyarray(desired)
np.testing.assert_allclose(actual.shape, desired.shape)
np.testing.assert_allclose(actual, desired, rtol=rtol, atol=atol, verbose=True)
def check_numerical_grads(
function, input_values, grad_values, function_value=None, delta=1e-3, atol=1e-2, rtol=0.1
):
"""A helper function that checks that numerical gradients of a function are
equal to gradients computed in some different way (analytical gradients).
Numerical gradients are computed using finite difference approximation. To
reduce the number of function evaluations, the number of points used is
gradually increased if the error value is too high (up to 5 points).
Parameters
----------
function
A function that takes inputs either as positional or as keyword
arguments (either `function(*input_values)` or `function(**input_values)`
should be correct) and returns a scalar result. Should accept numpy
ndarrays.
input_values : Dict[str, numpy.ndarray] or List[numpy.ndarray]
A list of values or a dict assigning values to variables. Represents the
point at which gradients should be computed.
grad_values : Dict[str, numpy.ndarray] or List[numpy.ndarray]
Gradients computed using a different method.
function_value : float, optional
Should be equal to `function(**input_values)`.
delta : float, optional
A small number used for numerical computation of partial derivatives.
The default 1e-3 is a good choice for float32.
atol : float, optional
Absolute tolerance. Gets multiplied by `sqrt(n)` where n is the size of a
gradient.
rtol : float, optional
Relative tolerance.
"""
# If input_values is a list then function accepts positional arguments
# In this case transform it to a function taking kwargs of the form {"0": ..., "1": ...}
if not isinstance(input_values, dict):
input_len = len(input_values)
input_values = {str(idx): val for idx, val in enumerate(input_values)}
def _function(_input_len=input_len, _orig_function=function, **kwargs):
return _orig_function(*(kwargs[str(i)] for i in range(input_len)))
function = _function
grad_values = {str(idx): val for idx, val in enumerate(grad_values)}
if function_value is None:
function_value = function(**input_values)
# a helper to modify j-th element of val by a_delta
def modify(val, j, a_delta):
val = val.copy()
val.reshape(-1)[j] = val.reshape(-1)[j] + a_delta
return val
# numerically compute a partial derivative with respect to j-th element of the var `name`
def derivative(x_name, j, a_delta):
modified_values = {
n: modify(val, j, a_delta) if n == x_name else val for n, val in input_values.items()
}
return (function(**modified_values) - function_value) / a_delta
def compare_derivative(j, n_der, grad):
der = grad.reshape(-1)[j]
return np.abs(n_der - der) < atol + rtol * np.abs(n_der)
for x_name, grad in grad_values.items():
if grad.shape != input_values[x_name].shape:
raise AssertionError(
"Gradient wrt '{}' has unexpected shape {}, expected {} ".format(
x_name, grad.shape, input_values[x_name].shape
)
)
ngrad = np.zeros_like(grad)
wrong_positions = []
# compute partial derivatives for each position in this variable
for j in range(np.prod(grad.shape)):
# forward difference approximation
nder = derivative(x_name, j, delta)
# if the derivative is not equal to the analytical one, try to use more
# precise and expensive methods
if not compare_derivative(j, nder, grad):
# central difference approximation
nder = (derivative(x_name, j, -delta) + nder) / 2
if not compare_derivative(j, nder, grad):
# central difference approximation using h = delta/2
cnder2 = (
derivative(x_name, j, delta / 2) + derivative(x_name, j, -delta / 2)
) / 2
# five-point derivative
nder = (4 * cnder2 - nder) / 3
# if the derivatives still don't match, add this position to the
# list of wrong positions
if not compare_derivative(j, nder, grad):
wrong_positions.append(np.unravel_index(j, grad.shape))
ngrad.reshape(-1)[j] = nder
wrong_percentage = int(100 * len(wrong_positions) / np.prod(grad.shape))
dist = np.sqrt(np.sum((ngrad - grad) ** 2))
grad_norm = np.sqrt(np.sum(ngrad ** 2))
if not (np.isfinite(dist) and np.isfinite(grad_norm)):
raise ValueError(
"NaN or infinity detected during numerical gradient checking wrt '{}'\n"
"analytical grad = {}\n numerical grad = {}\n".format(x_name, grad, ngrad)
)
# we multiply atol by this number to make it more universal for different sizes
sqrt_n = np.sqrt(float(np.prod(grad.shape)))
if dist > atol * sqrt_n + rtol * grad_norm:
raise AssertionError(
"Analytical and numerical grads wrt '{}' differ too much\n"
"analytical grad = {}\n numerical grad = {}\n"
"{}% of elements differ, first 10 of wrong positions: {}\n"
"distance > atol*sqrt(n) + rtol*grad_norm\n"
"distance {} > {}*{} + {}*{}".format(
x_name,
grad,
ngrad,
wrong_percentage,
wrong_positions[:10],
dist,
atol,
sqrt_n,
rtol,
grad_norm,
)
)
max_diff = np.max(np.abs(ngrad - grad))
avg_diff = np.mean(np.abs(ngrad - grad))
logging.info(
"Numerical grad test wrt '%s' of shape %s passes, "
"dist = %f, max_diff = %f, avg_diff = %f",
x_name,
grad.shape,
dist,
max_diff,
avg_diff,
)
def assert_prim_expr_equal(lhs, rhs):
"""Assert lhs and rhs equals to each iother.
Parameters
----------
lhs : tvm.tir.PrimExpr
The left operand.
rhs : tvm.tir.PrimExpr
The left operand.
"""
ana = tvm.arith.Analyzer()
res = ana.simplify(lhs - rhs)
equal = isinstance(res, tvm.tir.IntImm) and res.value == 0
if not equal:
raise ValueError("{} and {} are not equal".format(lhs, rhs))
def check_bool_expr_is_true(bool_expr, vranges, cond=None):
"""Check that bool_expr holds given the condition cond
for every value of free variables from vranges.
for example, 2x > 4y solves to x > 2y given x in (0, 10) and y in (0, 10)
here bool_expr is x > 2y, vranges is {x: (0, 10), y: (0, 10)}, cond is 2x > 4y
We creates iterations to check,
for x in range(10):
for y in range(10):
assert !(2x > 4y) || (x > 2y)
Parameters
----------
bool_expr : tvm.ir.PrimExpr
Boolean expression to check
vranges: Dict[tvm.tir.expr.Var, tvm.ir.Range]
Free variables and their ranges
cond: tvm.ir.PrimExpr
extra conditions needs to be satisfied.
"""
if cond is not None:
bool_expr = tvm.te.any(tvm.tir.Not(cond), bool_expr)
def _run_expr(expr, vranges):
"""Evaluate expr for every value of free variables
given by vranges and return the tensor of results.
"""
def _compute_body(*us):
vmap = {v: u + r.min for (v, r), u in zip(vranges.items(), us)}
return tvm.tir.stmt_functor.substitute(expr, vmap)
A = tvm.te.compute([r.extent.value for v, r in vranges.items()], _compute_body)
args = [tvm.nd.empty(A.shape, A.dtype)]
sch = tvm.te.create_schedule(A.op)
mod = tvm.build(sch, [A])
mod(*args)
return args[0].asnumpy()
res = _run_expr(bool_expr, vranges)
if not np.all(res):
indices = list(np.argwhere(res == 0)[0])
counterex = [(str(v), i + r.min) for (v, r), i in zip(vranges.items(), indices)]
counterex = sorted(counterex, key=lambda x: x[0])
counterex = ", ".join([v + " = " + str(i) for v, i in counterex])
ana = tvm.arith.Analyzer()
raise AssertionError(
"Expression {}\nis not true on {}\n"
"Counterexample: {}".format(ana.simplify(bool_expr), vranges, counterex)
)
def check_int_constraints_trans_consistency(constraints_trans, vranges=None):
"""Check IntConstraintsTransform is a bijective transformation.
Parameters
----------
constraints_trans : arith.IntConstraintsTransform
Integer constraints transformation
vranges: Dict[tvm.tir.Var, tvm.ir.Range]
Free variables and their ranges
"""
if vranges is None:
vranges = {}
def _check_forward(constraints1, constraints2, varmap, backvarmap):
ana = tvm.arith.Analyzer()
all_vranges = vranges.copy()
all_vranges.update({v: r for v, r in constraints1.ranges.items()})
# Check that the transformation is injective
cond_on_vars = tvm.tir.const(1, "bool")
for v in constraints1.variables:
if v in varmap:
# variable mapping is consistent
v_back = ana.simplify(tvm.tir.stmt_functor.substitute(varmap[v], backvarmap))
cond_on_vars = tvm.te.all(cond_on_vars, v == v_back)
# Also we have to check that the new relations are true when old relations are true
cond_subst = tvm.tir.stmt_functor.substitute(
tvm.te.all(tvm.tir.const(1, "bool"), *constraints2.relations), backvarmap
)
# We have to include relations from vranges too
for v in constraints2.variables:
if v in constraints2.ranges:
r = constraints2.ranges[v]
range_cond = tvm.te.all(v >= r.min, v < r.min + r.extent)
range_cond = tvm.tir.stmt_functor.substitute(range_cond, backvarmap)
cond_subst = tvm.te.all(cond_subst, range_cond)
cond_subst = ana.simplify(cond_subst)
check_bool_expr_is_true(
tvm.te.all(cond_subst, cond_on_vars),
all_vranges,
cond=tvm.te.all(tvm.tir.const(1, "bool"), *constraints1.relations),
)
_check_forward(
constraints_trans.src,
constraints_trans.dst,
constraints_trans.src_to_dst,
constraints_trans.dst_to_src,
)
_check_forward(
constraints_trans.dst,
constraints_trans.src,
constraints_trans.dst_to_src,
constraints_trans.src_to_dst,
)
def _get_targets():
target_str = os.environ.get("TVM_TEST_TARGETS", "")
if len(target_str) == 0:
target_str = DEFAULT_TEST_TARGETS
targets = set()
for dev in target_str.split(";"):
if len(dev) == 0:
continue
target_kind = dev.split()[0]
if tvm.runtime.enabled(target_kind) and tvm.device(target_kind, 0).exist:
targets.add(dev)
if len(targets) == 0:
logging.warning(
"None of the following targets are supported by this build of TVM: %s."
" Try setting TVM_TEST_TARGETS to a supported target. Defaulting to llvm.",
target_str,
)
return {"llvm"}
return targets
DEFAULT_TEST_TARGETS = (
"llvm;cuda;opencl;metal;rocm;vulkan;nvptx;"
"llvm -device=arm_cpu;opencl -device=mali,aocl_sw_emu"
)
def device_enabled(target):
"""Check if a target should be used when testing.
It is recommended that you use :py:func:`tvm.testing.parametrize_targets`
instead of manually checking if a target is enabled.
This allows the user to control which devices they are testing against. In
tests, this should be used to check if a device should be used when said
device is an optional part of the test.
Parameters
----------
target : str
Target string to check against
Returns
-------
bool
Whether or not the device associated with this target is enabled.
Example
-------
>>> @tvm.testing.uses_gpu
>>> def test_mytest():
>>> for target in ["cuda", "llvm"]:
>>> if device_enabled(target):
>>> test_body...
Here, `test_body` will only be reached by with `target="cuda"` on gpu test
nodes and `target="llvm"` on cpu test nodes.
"""
assert isinstance(target, str), "device_enabled requires a target as a string"
target_kind = target.split(" ")[
0
] # only check if device name is found, sometime there are extra flags
return any([target_kind in test_target for test_target in _get_targets()])
def enabled_targets():
"""Get all enabled targets with associated contexts.
In most cases, you should use :py:func:`tvm.testing.parametrize_targets` instead of
this function.
In this context, enabled means that TVM was built with support for this
target and the target name appears in the TVM_TEST_TARGETS environment
variable. If TVM_TEST_TARGETS is not set, it defaults to variable
DEFAULT_TEST_TARGETS in this module.
If you use this function in a test, you **must** decorate the test with
:py:func:`tvm.testing.uses_gpu` (otherwise it will never be run on the gpu).
Returns
-------
targets: list
A list of pairs of all enabled devices and the associated context
"""
return [(tgt, tvm.device(tgt)) for tgt in _get_targets()]
def _compose(args, decs):
"""Helper to apply multiple markers"""
if len(args) > 0:
f = args[0]
for d in reversed(decs):
f = d(f)
return f
return decs
def uses_gpu(*args):
"""Mark to differentiate tests that use the GPU is some capacity.
These tests will be run on CPU-only test nodes and on test nodes with GPUS.
To mark a test that must have a GPU present to run, use
:py:func:`tvm.testing.requires_gpu`.
Parameters
----------
f : function
Function to mark
"""
_uses_gpu = [pytest.mark.gpu]
return _compose(args, _uses_gpu)
def requires_gpu(*args):
"""Mark a test as requiring a GPU to run.
Tests with this mark will not be run unless a gpu is present.
Parameters
----------
f : function
Function to mark
"""
_requires_gpu = [
pytest.mark.skipif(not tvm.gpu().exist, reason="No GPU present"),
*uses_gpu(),
]
return _compose(args, _requires_gpu)
def requires_cuda(*args):
"""Mark a test as requiring the CUDA runtime.
This also marks the test as requiring a gpu.
Parameters
----------
f : function
Function to mark
"""
_requires_cuda = [
pytest.mark.cuda,
pytest.mark.skipif(not device_enabled("cuda"), reason="CUDA support not enabled"),
*requires_gpu(),
]
return _compose(args, _requires_cuda)
def requires_cudagraph(*args):
"""Mark a test as requiring the CUDA Graph Feature
This also marks the test as requiring cuda
Parameters
----------
f : function
Function to mark
"""
_requires_cudagraph = [
pytest.mark.skipif(
not nvcc.have_cudagraph(), reason="CUDA Graph is not supported in this environment"
),
*requires_cuda(),
]
return _compose(args, _requires_cudagraph)
def requires_opencl(*args):
"""Mark a test as requiring the OpenCL runtime.
This also marks the test as requiring a gpu.
Parameters
----------
f : function
Function to mark
"""
_requires_opencl = [
pytest.mark.opencl,
pytest.mark.skipif(not device_enabled("opencl"), reason="OpenCL support not enabled"),
*requires_gpu(),
]
return _compose(args, _requires_opencl)
def requires_rocm(*args):
"""Mark a test as requiring the rocm runtime.
This also marks the test as requiring a gpu.
Parameters
----------
f : function
Function to mark
"""
_requires_rocm = [
pytest.mark.rocm,
pytest.mark.skipif(not device_enabled("rocm"), reason="rocm support not enabled"),
*requires_gpu(),
]
return _compose(args, _requires_rocm)
def requires_metal(*args):
"""Mark a test as requiring the metal runtime.
This also marks the test as requiring a gpu.
Parameters
----------
f : function
Function to mark
"""
_requires_metal = [
pytest.mark.metal,
pytest.mark.skipif(not device_enabled("metal"), reason="metal support not enabled"),
*requires_gpu(),
]
return _compose(args, _requires_metal)
def requires_vulkan(*args):
"""Mark a test as requiring the vulkan runtime.
This also marks the test as requiring a gpu.
Parameters
----------
f : function
Function to mark
"""
_requires_vulkan = [
pytest.mark.vulkan,
pytest.mark.skipif(not device_enabled("vulkan"), reason="vulkan support not enabled"),
*requires_gpu(),
]
return _compose(args, _requires_vulkan)
def requires_tensorcore(*args):
"""Mark a test as requiring a tensorcore to run.
Tests with this mark will not be run unless a tensorcore is present.
Parameters
----------
f : function
Function to mark
"""
_requires_tensorcore = [
pytest.mark.tensorcore,
pytest.mark.skipif(
not tvm.gpu().exist or not nvcc.have_tensorcore(tvm.gpu(0).compute_version),
reason="No tensorcore present",
),
*requires_gpu(),
]
return _compose(args, _requires_tensorcore)
def requires_llvm(*args):
"""Mark a test as requiring llvm to run.
Parameters
----------
f : function
Function to mark
"""
_requires_llvm = [
pytest.mark.llvm,
pytest.mark.skipif(not device_enabled("llvm"), reason="LLVM support not enabled"),
]
return _compose(args, _requires_llvm)
def requires_micro(*args):
"""Mark a test as requiring microTVM to run.
Parameters
----------
f : function
Function to mark
"""
_requires_micro = [
pytest.mark.skipif(
tvm.support.libinfo().get("USE_MICRO", "OFF") != "ON",
reason="MicroTVM support not enabled. Set USE_MICRO=ON in config.cmake to enable.",
)
]
return _compose(args, _requires_micro)
def requires_rpc(*args):
"""Mark a test as requiring rpc to run.
Parameters
----------
f : function
Function to mark
"""
_requires_rpc = [
pytest.mark.skipif(
tvm.support.libinfo().get("USE_RPC", "OFF") != "ON",
reason="RPC support not enabled. Set USE_RPC=ON in config.cmake to enable.",
)
]
return _compose(args, _requires_rpc)
def _target_to_requirement(target):
# mapping from target to decorator
if target.startswith("cuda"):
return requires_cuda()
if target.startswith("rocm"):
return requires_rocm()
if target.startswith("vulkan"):
return requires_vulkan()
if target.startswith("nvptx"):
return [*requires_llvm(), *requires_gpu()]
if target.startswith("metal"):
return requires_metal()
if target.startswith("opencl"):
return requires_opencl()
if target.startswith("llvm"):
return requires_llvm()
return []
def parametrize_targets(*args):
"""Parametrize a test over all enabled targets.
Use this decorator when you want your test to be run over a variety of
targets and devices (including cpu and gpu devices).
Parameters
----------
f : function
Function to parametrize. Must be of the form `def test_xxxxxxxxx(target, dev)`:,
where `xxxxxxxxx` is any name.
targets : list[str], optional
Set of targets to run against. If not supplied,
:py:func:`tvm.testing.enabled_targets` will be used.
Example
-------
>>> @tvm.testing.parametrize
>>> def test_mytest(target, dev):
>>> ... # do something
Or
>>> @tvm.testing.parametrize("llvm", "cuda")
>>> def test_mytest(target, dev):
>>> ... # do something
"""
def wrap(targets):
def func(f):
params = [
pytest.param(target, tvm.device(target, 0), marks=_target_to_requirement(target))
for target in targets
]
return pytest.mark.parametrize("target,dev", params)(f)
return func
if len(args) == 1 and callable(args[0]):
targets = [t for t, _ in enabled_targets()]
return wrap(targets)(args[0])
return wrap(args)
def identity_after(x, sleep):
"""Testing function to return identity after sleep
Parameters
----------
x : int
The input value.
sleep : float
The amount of time to sleep
Returns
-------
x : object
The original value
"""
if sleep:
time.sleep(sleep)
return x
def terminate_self():
"""Testing function to terminate the process."""
sys.exit(-1)
tvm._ffi._init_api("testing", __name__)
| 32.247396
| 97
| 0.627877
|
ad352ec4cf76e78a60fbac2aad46554d61210be8
| 35,941
|
py
|
Python
|
astropy/io/fits/tests/test_connect.py
|
emirkmo/astropy
|
d96cd45b25ae55117d1bcc9c40e83a82037fc815
|
[
"BSD-3-Clause"
] | null | null | null |
astropy/io/fits/tests/test_connect.py
|
emirkmo/astropy
|
d96cd45b25ae55117d1bcc9c40e83a82037fc815
|
[
"BSD-3-Clause"
] | null | null | null |
astropy/io/fits/tests/test_connect.py
|
emirkmo/astropy
|
d96cd45b25ae55117d1bcc9c40e83a82037fc815
|
[
"BSD-3-Clause"
] | null | null | null |
import gc
import pathlib
import warnings
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from astropy.io.fits.column import (_parse_tdisp_format, _fortran_to_python_format,
python_to_tdisp)
from astropy.io.fits import HDUList, PrimaryHDU, BinTableHDU, ImageHDU, table_to_hdu
from astropy.io import fits
from astropy import units as u
from astropy.table import Table, QTable, NdarrayMixin, Column
from astropy.table.table_helpers import simple_table
from astropy.units import allclose as quantity_allclose
from astropy.units.format.fits import UnitScaleError
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import (AstropyUserWarning,
AstropyDeprecationWarning)
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
from astropy.time import Time
from astropy.units.quantity import QuantityInfo
from astropy.io.tests.mixin_columns import mixin_cols, compare_attrs, serialized_names
# FITS does not preserve precision, in_subfmt, and out_subfmt.
time_attrs = ['value', 'shape', 'format', 'scale', 'location']
compare_attrs = {name: (time_attrs if isinstance(col, Time) else compare_attrs[name])
for name, col in mixin_cols.items()}
# FITS does not support multi-element location, array with object dtype,
# or logarithmic quantities.
unsupported_cols = {name: col for name, col in mixin_cols.items()
if (isinstance(col, Time) and col.location.shape != ()
or isinstance(col, np.ndarray) and col.dtype.kind == 'O'
or isinstance(col, u.LogQuantity))}
mixin_cols = {name: col for name, col in mixin_cols.items()
if name not in unsupported_cols}
def equal_data(a, b):
for name in a.dtype.names:
if not np.all(a[name] == b[name]):
return False
return True
class TestSingleTable:
def setup_class(self):
self.data = np.array(list(zip([1, 2, 3, 4],
['a', 'b', 'c', 'd'],
[2.3, 4.5, 6.7, 8.9])),
dtype=[('a', int), ('b', 'U1'), ('c', float)])
def test_simple(self, tmpdir):
filename = str(tmpdir.join('test_simple.fts'))
t1 = Table(self.data)
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert equal_data(t1, t2)
def test_simple_pathlib(self, tmpdir):
filename = pathlib.Path(str(tmpdir.join('test_simple.fit')))
t1 = Table(self.data)
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert equal_data(t1, t2)
def test_simple_meta(self, tmpdir):
filename = str(tmpdir.join('test_simple.fits'))
t1 = Table(self.data)
t1.meta['A'] = 1
t1.meta['B'] = 2.3
t1.meta['C'] = 'spam'
t1.meta['comments'] = ['this', 'is', 'a', 'long', 'comment']
t1.meta['HISTORY'] = ['first', 'second', 'third']
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert equal_data(t1, t2)
for key in t1.meta:
if isinstance(t1.meta, list):
for i in range(len(t1.meta[key])):
assert t1.meta[key][i] == t2.meta[key][i]
else:
assert t1.meta[key] == t2.meta[key]
def test_simple_meta_conflicting(self, tmpdir):
filename = str(tmpdir.join('test_simple.fits'))
t1 = Table(self.data)
t1.meta['ttype1'] = 'spam'
with pytest.warns(AstropyUserWarning, match='Meta-data keyword ttype1 '
'will be ignored since it conflicts with a FITS '
'reserved keyword') as w:
t1.write(filename, overwrite=True)
assert len(w) == 1
def test_simple_noextension(self, tmpdir):
"""
Test that file type is recognized without extension
"""
filename = str(tmpdir.join('test_simple'))
t1 = Table(self.data)
t1.write(filename, overwrite=True, format='fits')
t2 = Table.read(filename)
assert equal_data(t1, t2)
@pytest.mark.parametrize('table_type', (Table, QTable))
def test_with_units(self, table_type, tmpdir):
filename = str(tmpdir.join('test_with_units.fits'))
t1 = table_type(self.data)
t1['a'].unit = u.m
t1['c'].unit = u.km / u.s
t1.write(filename, overwrite=True)
t2 = table_type.read(filename)
assert equal_data(t1, t2)
assert t2['a'].unit == u.m
assert t2['c'].unit == u.km / u.s
def test_with_custom_units_qtable(self, tmpdir):
# Test only for QTable - for Table's Column, new units are dropped
# (as is checked in test_write_drop_nonstandard_units).
filename = str(tmpdir.join('test_with_units.fits'))
unit = u.def_unit('bandpass_sol_lum')
t = QTable()
t['l'] = np.ones(5) * unit
with pytest.warns(AstropyUserWarning) as w:
t.write(filename, overwrite=True)
assert len(w) == 1
assert 'bandpass_sol_lum' in str(w[0].message)
# Just reading back, the data is fine but the unit is not recognized.
with pytest.warns(u.UnitsWarning, match="'bandpass_sol_lum' did not parse") as w:
t2 = QTable.read(filename)
assert len(w) == 1
assert isinstance(t2['l'].unit, u.UnrecognizedUnit)
assert str(t2['l'].unit) == 'bandpass_sol_lum'
assert np.all(t2['l'].value == t['l'].value)
# But if we enable the unit, it should be recognized.
with u.add_enabled_units(unit):
t3 = QTable.read(filename)
assert t3['l'].unit is unit
assert equal_data(t3, t)
# Regression check for #8897; write used to fail when a custom
# unit was enabled.
with pytest.warns(AstropyUserWarning):
t3.write(filename, overwrite=True)
# It should also be possible to read the file in using a unit alias,
# even to a unit that may not be the same.
with u.set_enabled_aliases({'bandpass_sol_lum': u.Lsun}):
t3 = QTable.read(filename)
assert t3['l'].unit is u.Lsun
@pytest.mark.parametrize('table_type', (Table, QTable))
def test_read_with_unit_aliases(self, table_type):
hdu = BinTableHDU(self.data)
hdu.columns[0].unit = 'Angstroms'
hdu.columns[2].unit = 'ergs/(cm.s.Angstroms)'
with u.set_enabled_aliases(dict(Angstroms=u.AA, ergs=u.erg)):
t = table_type.read(hdu)
assert t['a'].unit == u.AA
assert t['c'].unit == u.erg/(u.cm*u.s*u.AA)
@pytest.mark.parametrize('table_type', (Table, QTable))
def test_with_format(self, table_type, tmpdir):
filename = str(tmpdir.join('test_with_format.fits'))
t1 = table_type(self.data)
t1['a'].format = '{:5d}'
t1['b'].format = '{:>20}'
t1['c'].format = '{:6.2f}'
t1.write(filename, overwrite=True)
t2 = table_type.read(filename)
assert equal_data(t1, t2)
assert t2['a'].format == '{:5d}'
assert t2['b'].format == '{:>20}'
assert t2['c'].format == '{:6.2f}'
def test_masked(self, tmpdir):
filename = str(tmpdir.join('test_masked.fits'))
t1 = Table(self.data, masked=True)
t1.mask['a'] = [1, 0, 1, 0]
t1.mask['b'] = [1, 0, 0, 1]
t1.mask['c'] = [0, 1, 1, 0]
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert equal_data(t1, t2)
assert np.all(t1['a'].mask == t2['a'].mask)
assert np.all(t1['b'].mask == t2['b'].mask)
assert np.all(t1['c'].mask == t2['c'].mask)
@pytest.mark.parametrize('masked', [True, False])
def test_masked_nan(self, masked, tmpdir):
"""Check that masked values by default are replaced by NaN.
This should work for any shape and be independent of whether the
Table is formally masked or not.
"""
filename = str(tmpdir.join('test_masked_nan.fits'))
a = np.ma.MaskedArray([5.25, 8.5, 3.75, 6.25], mask=[1, 0, 1, 0])
b = np.ma.MaskedArray([2.5, 4.5, 6.75, 8.875], mask=[1, 0, 0, 1], dtype='f4')
c = np.ma.stack([a, b], axis=-1)
t1 = Table([a, b, c], names=['a', 'b', 'c'], masked=masked)
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert_array_equal(t2['a'].data, [np.nan, 8.5, np.nan, 6.25])
assert_array_equal(t2['b'].data, [np.nan, 4.5, 6.75, np.nan])
assert_array_equal(t2['c'].data, np.stack([t2['a'].data, t2['b'].data],
axis=-1))
assert np.all(t1['a'].mask == t2['a'].mask)
assert np.all(t1['b'].mask == t2['b'].mask)
assert np.all(t1['c'].mask == t2['c'].mask)
def test_masked_serialize_data_mask(self, tmpdir):
filename = str(tmpdir.join('test_masked_nan.fits'))
a = np.ma.MaskedArray([5.25, 8.5, 3.75, 6.25], mask=[1, 0, 1, 0])
b = np.ma.MaskedArray([2.5, 4.5, 6.75, 8.875], mask=[1, 0, 0, 1])
c = np.ma.stack([a, b], axis=-1)
t1 = Table([a, b, c], names=['a', 'b', 'c'])
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert_array_equal(t2['a'].data, [5.25, 8.5, 3.75, 6.25])
assert_array_equal(t2['b'].data, [2.5, 4.5, 6.75, 8.875])
assert_array_equal(t2['c'].data, np.stack([t2['a'].data, t2['b'].data],
axis=-1))
assert np.all(t1['a'].mask == t2['a'].mask)
assert np.all(t1['b'].mask == t2['b'].mask)
assert np.all(t1['c'].mask == t2['c'].mask)
def test_read_from_fileobj(self, tmpdir):
filename = str(tmpdir.join('test_read_from_fileobj.fits'))
hdu = BinTableHDU(self.data)
hdu.writeto(filename, overwrite=True)
with open(filename, 'rb') as f:
t = Table.read(f)
assert equal_data(t, self.data)
def test_read_with_nonstandard_units(self):
hdu = BinTableHDU(self.data)
hdu.columns[0].unit = 'RADIANS'
hdu.columns[1].unit = 'spam'
hdu.columns[2].unit = 'millieggs'
with pytest.warns(u.UnitsWarning, match="did not parse as fits unit"):
t = Table.read(hdu)
assert equal_data(t, self.data)
@pytest.mark.parametrize('table_type', (Table, QTable))
def test_write_drop_nonstandard_units(self, table_type, tmpdir):
# While we are generous on input (see above), we are strict on
# output, dropping units not recognized by the fits standard.
filename = str(tmpdir.join('test_nonstandard_units.fits'))
spam = u.def_unit('spam')
t = table_type()
t['a'] = [1., 2., 3.] * spam
with pytest.warns(AstropyUserWarning, match='spam') as w:
t.write(filename)
assert len(w) == 1
if table_type is Table:
assert ('cannot be recovered in reading. ') in str(w[0].message)
else:
assert 'lost to non-astropy fits readers' in str(w[0].message)
with fits.open(filename) as ff:
hdu = ff[1]
assert 'TUNIT1' not in hdu.header
def test_memmap(self, tmpdir):
filename = str(tmpdir.join('test_simple.fts'))
t1 = Table(self.data)
t1.write(filename, overwrite=True)
t2 = Table.read(filename, memmap=False)
t3 = Table.read(filename, memmap=True)
assert equal_data(t2, t3)
# To avoid issues with --open-files, we need to remove references to
# data that uses memory mapping and force the garbage collection
del t1, t2, t3
gc.collect()
@pytest.mark.parametrize('memmap', (False, True))
def test_character_as_bytes(self, tmpdir, memmap):
filename = str(tmpdir.join('test_simple.fts'))
t1 = Table(self.data)
t1.write(filename, overwrite=True)
t2 = Table.read(filename, character_as_bytes=False, memmap=memmap)
t3 = Table.read(filename, character_as_bytes=True, memmap=memmap)
assert t2['b'].dtype.kind == 'U'
assert t3['b'].dtype.kind == 'S'
assert equal_data(t2, t3)
# To avoid issues with --open-files, we need to remove references to
# data that uses memory mapping and force the garbage collection
del t1, t2, t3
gc.collect()
def test_oned_single_element(self, tmpdir):
filename = str(tmpdir.join('test_oned_single_element.fits'))
table = Table({'x': [[1], [2]]})
table.write(filename, overwrite=True)
read = Table.read(filename)
assert read['x'].shape == (2, 1)
assert len(read['x'][0]) == 1
def test_write_append(self, tmpdir):
t = Table(self.data)
hdu = table_to_hdu(t)
def check_equal(filename, expected, start_from=1):
with fits.open(filename) as hdu_list:
assert len(hdu_list) == expected
for hdu_table in hdu_list[start_from:]:
assert hdu_table.header == hdu.header
assert np.all(hdu_table.data == hdu.data)
filename = str(tmpdir.join('test_write_append.fits'))
t.write(filename, append=True)
t.write(filename, append=True)
check_equal(filename, 3)
# Check the overwrite works correctly.
t.write(filename, append=True, overwrite=True)
t.write(filename, append=True)
check_equal(filename, 3)
# Normal write, check it's not appending.
t.write(filename, overwrite=True)
t.write(filename, overwrite=True)
check_equal(filename, 2)
# Now write followed by append, with different shaped tables.
t2 = Table(np.array([1, 2]))
t2.write(filename, overwrite=True)
t.write(filename, append=True)
check_equal(filename, 3, start_from=2)
assert equal_data(t2, Table.read(filename, hdu=1))
def test_write_overwrite(self, tmpdir):
t = Table(self.data)
filename = str(tmpdir.join('test_write_overwrite.fits'))
t.write(filename)
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
t.write(filename)
t.write(filename, overwrite=True)
def test_mask_nans_on_read(self, tmpdir):
filename = str(tmpdir.join('test_inexact_format_parse_on_read.fits'))
c1 = fits.Column(name='a', array=np.array([1, 2, np.nan]), format='E')
table_hdu = fits.TableHDU.from_columns([c1])
table_hdu.writeto(filename)
tab = Table.read(filename)
assert any(tab.mask)
assert tab.mask[2]
def test_mask_null_on_read(self, tmpdir):
filename = str(tmpdir.join('test_null_format_parse_on_read.fits'))
col = fits.Column(name='a', array=np.array([1, 2, 99, 60000], dtype='u2'), format='I', null=99, bzero=32768)
bin_table_hdu = fits.BinTableHDU.from_columns([col])
bin_table_hdu.writeto(filename, overwrite=True)
tab = Table.read(filename)
assert any(tab.mask)
assert tab.mask[2]
class TestMultipleHDU:
def setup_class(self):
self.data1 = np.array(list(zip([1, 2, 3, 4],
['a', 'b', 'c', 'd'],
[2.3, 4.5, 6.7, 8.9])),
dtype=[('a', int), ('b', 'U1'), ('c', float)])
self.data2 = np.array(list(zip([1.4, 2.3, 3.2, 4.7],
[2.3, 4.5, 6.7, 8.9])),
dtype=[('p', float), ('q', float)])
self.data3 = np.array(list(zip([1, 2, 3, 4],
[2.3, 4.5, 6.7, 8.9])),
dtype=[('A', int), ('B', float)])
hdu0 = PrimaryHDU()
hdu1 = BinTableHDU(self.data1, name='first')
hdu2 = BinTableHDU(self.data2, name='second')
hdu3 = ImageHDU(np.ones((3, 3)), name='third')
hdu4 = BinTableHDU(self.data3)
self.hdus = HDUList([hdu0, hdu1, hdu2, hdu3, hdu4])
self.hdusb = HDUList([hdu0, hdu3, hdu2, hdu1])
self.hdus3 = HDUList([hdu0, hdu3, hdu2])
self.hdus2 = HDUList([hdu0, hdu1, hdu3])
self.hdus1 = HDUList([hdu0, hdu1])
def teardown_class(self):
del self.hdus
def setup_method(self, method):
warnings.filterwarnings('always')
def test_read(self, tmpdir):
filename = str(tmpdir.join('test_read.fits'))
self.hdus.writeto(filename)
with pytest.warns(AstropyUserWarning,
match=r"hdu= was not specified but multiple tables "
r"are present, reading in first available "
r"table \(hdu=1\)"):
t = Table.read(filename)
assert equal_data(t, self.data1)
filename = str(tmpdir.join('test_read_2.fits'))
self.hdusb.writeto(filename)
with pytest.warns(AstropyUserWarning,
match=r"hdu= was not specified but multiple tables "
r"are present, reading in first available "
r"table \(hdu=2\)"):
t3 = Table.read(filename)
assert equal_data(t3, self.data2)
def test_read_with_hdu_0(self, tmpdir):
filename = str(tmpdir.join('test_read_with_hdu_0.fits'))
self.hdus.writeto(filename)
with pytest.raises(ValueError) as exc:
Table.read(filename, hdu=0)
assert exc.value.args[0] == 'No table found in hdu=0'
@pytest.mark.parametrize('hdu', [1, 'first'])
def test_read_with_hdu_1(self, tmpdir, hdu):
filename = str(tmpdir.join('test_read_with_hdu_1.fits'))
self.hdus.writeto(filename)
t = Table.read(filename, hdu=hdu)
assert equal_data(t, self.data1)
@pytest.mark.parametrize('hdu', [2, 'second'])
def test_read_with_hdu_2(self, tmpdir, hdu):
filename = str(tmpdir.join('test_read_with_hdu_2.fits'))
self.hdus.writeto(filename)
t = Table.read(filename, hdu=hdu)
assert equal_data(t, self.data2)
@pytest.mark.parametrize('hdu', [3, 'third'])
def test_read_with_hdu_3(self, tmpdir, hdu):
filename = str(tmpdir.join('test_read_with_hdu_3.fits'))
self.hdus.writeto(filename)
with pytest.raises(ValueError, match='No table found in hdu=3'):
Table.read(filename, hdu=hdu)
def test_read_with_hdu_4(self, tmpdir):
filename = str(tmpdir.join('test_read_with_hdu_4.fits'))
self.hdus.writeto(filename)
t = Table.read(filename, hdu=4)
assert equal_data(t, self.data3)
@pytest.mark.parametrize('hdu', [2, 3, '1', 'second', ''])
def test_read_with_hdu_missing(self, tmpdir, hdu):
filename = str(tmpdir.join('test_warn_with_hdu_1.fits'))
self.hdus1.writeto(filename)
with pytest.warns(AstropyDeprecationWarning,
match=rf"Specified hdu={hdu} not found, "
r"reading in first available table \(hdu=1\)"):
t1 = Table.read(filename, hdu=hdu)
assert equal_data(t1, self.data1)
@pytest.mark.parametrize('hdu', [0, 2, 'third'])
def test_read_with_hdu_warning(self, tmpdir, hdu):
filename = str(tmpdir.join('test_warn_with_hdu_2.fits'))
self.hdus2.writeto(filename)
with pytest.warns(AstropyDeprecationWarning,
match=rf"No table found in specified hdu={hdu}, "
r"reading in first available table \(hdu=1\)"):
t2 = Table.read(filename, hdu=hdu)
assert equal_data(t2, self.data1)
@pytest.mark.parametrize('hdu', [0, 1, 'third'])
def test_read_in_last_hdu(self, tmpdir, hdu):
filename = str(tmpdir.join('test_warn_with_hdu_3.fits'))
self.hdus3.writeto(filename)
with pytest.warns(AstropyDeprecationWarning,
match=rf"No table found in specified hdu={hdu}, "
r"reading in first available table \(hdu=2\)"):
t3 = Table.read(filename, hdu=hdu)
assert equal_data(t3, self.data2)
def test_read_from_hdulist(self):
with pytest.warns(AstropyUserWarning,
match=r"hdu= was not specified but multiple tables "
r"are present, reading in first available "
r"table \(hdu=1\)"):
t = Table.read(self.hdus)
assert equal_data(t, self.data1)
with pytest.warns(AstropyUserWarning,
match=r"hdu= was not specified but multiple tables "
r"are present, reading in first available "
r"table \(hdu=2\)"):
t3 = Table.read(self.hdusb)
assert equal_data(t3, self.data2)
def test_read_from_hdulist_with_hdu_0(self):
with pytest.raises(ValueError) as exc:
Table.read(self.hdus, hdu=0)
assert exc.value.args[0] == 'No table found in hdu=0'
@pytest.mark.parametrize('hdu', [1, 'first', None])
def test_read_from_hdulist_with_single_table(self, hdu):
t = Table.read(self.hdus1, hdu=hdu)
assert equal_data(t, self.data1)
@pytest.mark.parametrize('hdu', [1, 'first'])
def test_read_from_hdulist_with_hdu_1(self, hdu):
t = Table.read(self.hdus, hdu=hdu)
assert equal_data(t, self.data1)
@pytest.mark.parametrize('hdu', [2, 'second'])
def test_read_from_hdulist_with_hdu_2(self, hdu):
t = Table.read(self.hdus, hdu=hdu)
assert equal_data(t, self.data2)
@pytest.mark.parametrize('hdu', [3, 'third'])
def test_read_from_hdulist_with_hdu_3(self, hdu):
with pytest.raises(ValueError, match='No table found in hdu=3'):
Table.read(self.hdus, hdu=hdu)
@pytest.mark.parametrize('hdu', [0, 2, 'third'])
def test_read_from_hdulist_with_hdu_warning(self, hdu):
with pytest.warns(AstropyDeprecationWarning,
match=rf"No table found in specified hdu={hdu}, "
r"reading in first available table \(hdu=1\)"):
t2 = Table.read(self.hdus2, hdu=hdu)
assert equal_data(t2, self.data1)
@pytest.mark.parametrize('hdu', [2, 3, '1', 'second', ''])
def test_read_from_hdulist_with_hdu_missing(self, hdu):
with pytest.warns(AstropyDeprecationWarning,
match=rf"Specified hdu={hdu} not found, "
r"reading in first available table \(hdu=1\)"):
t1 = Table.read(self.hdus1, hdu=hdu)
assert equal_data(t1, self.data1)
@pytest.mark.parametrize('hdu', [0, 1, 'third'])
def test_read_from_hdulist_in_last_hdu(self, hdu):
with pytest.warns(AstropyDeprecationWarning,
match=rf"No table found in specified hdu={hdu}, "
r"reading in first available table \(hdu=2\)"):
t3 = Table.read(self.hdus3, hdu=hdu)
assert equal_data(t3, self.data2)
@pytest.mark.parametrize('hdu', [None, 1, 'first'])
def test_read_from_single_hdu(self, hdu):
t = Table.read(self.hdus[1])
assert equal_data(t, self.data1)
def test_masking_regression_1795():
"""
Regression test for #1795 - this bug originally caused columns where TNULL
was not defined to have their first element masked.
"""
t = Table.read(get_pkg_data_filename('data/tb.fits'))
assert np.all(t['c1'].mask == np.array([False, False]))
assert not hasattr(t['c2'], 'mask')
assert not hasattr(t['c3'], 'mask')
assert not hasattr(t['c4'], 'mask')
assert np.all(t['c1'].data == np.array([1, 2]))
assert np.all(t['c2'].data == np.array([b'abc', b'xy ']))
assert_allclose(t['c3'].data, np.array([3.70000007153, 6.6999997139]))
assert np.all(t['c4'].data == np.array([False, True]))
def test_scale_error():
a = [1, 4, 5]
b = [2.0, 5.0, 8.2]
c = ['x', 'y', 'z']
t = Table([a, b, c], names=('a', 'b', 'c'), meta={'name': 'first table'})
t['a'].unit = '1.2'
with pytest.raises(UnitScaleError, match=r"The column 'a' could not be "
r"stored in FITS format because it has a scale '\(1\.2\)'"
r" that is not recognized by the FITS standard\. Either "
r"scale the data or change the units\."):
t.write('t.fits', format='fits', overwrite=True)
@pytest.mark.parametrize('tdisp_str, format_return',
[('EN10.5', ('EN', '10', '5', None)),
('F6.2', ('F', '6', '2', None)),
('B5.10', ('B', '5', '10', None)),
('E10.5E3', ('E', '10', '5', '3')),
('A21', ('A', '21', None, None))])
def test_parse_tdisp_format(tdisp_str, format_return):
assert _parse_tdisp_format(tdisp_str) == format_return
@pytest.mark.parametrize('tdisp_str, format_str_return',
[('G15.4E2', '{:15.4g}'),
('Z5.10', '{:5x}'),
('I6.5', '{:6d}'),
('L8', '{:>8}'),
('E20.7', '{:20.7e}')])
def test_fortran_to_python_format(tdisp_str, format_str_return):
assert _fortran_to_python_format(tdisp_str) == format_str_return
@pytest.mark.parametrize('fmt_str, tdisp_str',
[('{:3d}', 'I3'),
('3d', 'I3'),
('7.3f', 'F7.3'),
('{:>4}', 'A4'),
('{:7.4f}', 'F7.4'),
('%5.3g', 'G5.3'),
('%10s', 'A10'),
('%.4f', 'F13.4')])
def test_python_to_tdisp(fmt_str, tdisp_str):
assert python_to_tdisp(fmt_str) == tdisp_str
def test_logical_python_to_tdisp():
assert python_to_tdisp('{:>7}', logical_dtype=True) == 'L7'
def test_bool_column(tmpdir):
"""
Regression test for https://github.com/astropy/astropy/issues/1953
Ensures that Table columns of bools are properly written to a FITS table.
"""
arr = np.ones(5, dtype=bool)
arr[::2] == np.False_
t = Table([arr])
t.write(str(tmpdir.join('test.fits')), overwrite=True)
with fits.open(str(tmpdir.join('test.fits'))) as hdul:
assert hdul[1].data['col0'].dtype == np.dtype('bool')
assert np.all(hdul[1].data['col0'] == arr)
def test_unicode_column(tmpdir):
"""
Test that a column of unicode strings is still written as one
byte-per-character in the FITS table (so long as the column can be ASCII
encoded).
Regression test for one of the issues fixed in
https://github.com/astropy/astropy/pull/4228
"""
t = Table([np.array(['a', 'b', 'cd'])])
t.write(str(tmpdir.join('test.fits')), overwrite=True)
with fits.open(str(tmpdir.join('test.fits'))) as hdul:
assert np.all(hdul[1].data['col0'] == ['a', 'b', 'cd'])
assert hdul[1].header['TFORM1'] == '2A'
t2 = Table([np.array(['\N{SNOWMAN}'])])
with pytest.raises(UnicodeEncodeError):
t2.write(str(tmpdir.join('test.fits')), overwrite=True)
def test_unit_warnings_read_write(tmpdir):
filename = str(tmpdir.join('test_unit.fits'))
t1 = Table([[1, 2], [3, 4]], names=['a', 'b'])
t1['a'].unit = 'm/s'
t1['b'].unit = 'not-a-unit'
with pytest.warns(u.UnitsWarning, match="'not-a-unit' did not parse as fits unit") as w:
t1.write(filename, overwrite=True)
assert len(w) == 1
with pytest.warns(u.UnitsWarning, match="'not-a-unit' did not parse as fits unit") as w:
Table.read(filename, hdu=1)
def test_convert_comment_convention(tmpdir):
"""
Regression test for https://github.com/astropy/astropy/issues/6079
"""
filename = get_pkg_data_filename('data/stddata.fits')
with pytest.warns(AstropyUserWarning, match=r'hdu= was not specified but '
r'multiple tables are present'):
t = Table.read(filename)
assert t.meta['comments'] == [
'',
' *** End of mandatory fields ***',
'',
'',
' *** Column names ***',
'',
'',
' *** Column formats ***',
''
]
def assert_objects_equal(obj1, obj2, attrs, compare_class=True):
if compare_class:
assert obj1.__class__ is obj2.__class__
info_attrs = ['info.name', 'info.format', 'info.unit', 'info.description', 'info.meta']
for attr in attrs + info_attrs:
a1 = obj1
a2 = obj2
for subattr in attr.split('.'):
try:
a1 = getattr(a1, subattr)
a2 = getattr(a2, subattr)
except AttributeError:
a1 = a1[subattr]
a2 = a2[subattr]
# Mixin info.meta can None instead of empty OrderedDict(), #6720 would
# fix this.
if attr == 'info.meta':
if a1 is None:
a1 = {}
if a2 is None:
a2 = {}
if isinstance(a1, np.ndarray) and a1.dtype.kind == 'f':
assert quantity_allclose(a1, a2, rtol=1e-15)
else:
assert np.all(a1 == a2)
def test_fits_mixins_qtable_to_table(tmpdir):
"""Test writing as QTable and reading as Table. Ensure correct classes
come out.
"""
filename = str(tmpdir.join('test_simple.fits'))
names = sorted(mixin_cols)
t = QTable([mixin_cols[name] for name in names], names=names)
t.write(filename, format='fits')
t2 = Table.read(filename, format='fits', astropy_native=True)
assert t.colnames == t2.colnames
for name, col in t.columns.items():
col2 = t2[name]
# Special-case Time, which does not yet support round-tripping
# the format.
if isinstance(col2, Time):
col2.format = col.format
attrs = compare_attrs[name]
compare_class = True
if isinstance(col.info, QuantityInfo):
# Downgrade Quantity to Column + unit
assert type(col2) is Column
# Class-specific attributes like `value` or `wrap_angle` are lost.
attrs = ['unit']
compare_class = False
# Compare data values here (assert_objects_equal doesn't know how in this case)
assert np.all(col.value == col2)
assert_objects_equal(col, col2, attrs, compare_class)
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_fits_mixins_as_one(table_cls, tmpdir):
"""Test write/read all cols at once and validate intermediate column names"""
filename = str(tmpdir.join('test_simple.fits'))
names = sorted(mixin_cols)
# FITS stores times directly, so we just get the column back.
all_serialized_names = []
for name in sorted(mixin_cols):
all_serialized_names.extend(
[name] if isinstance(mixin_cols[name], Time)
else serialized_names[name])
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta['C'] = 'spam'
t.meta['comments'] = ['this', 'is', 'a', 'comment']
t.meta['history'] = ['first', 'second', 'third']
t.write(filename, format="fits")
t2 = table_cls.read(filename, format='fits', astropy_native=True)
assert t2.meta['C'] == 'spam'
assert t2.meta['comments'] == ['this', 'is', 'a', 'comment']
assert t2.meta['HISTORY'] == ['first', 'second', 'third']
assert t.colnames == t2.colnames
# Read directly via fits and confirm column names
with fits.open(filename) as hdus:
assert hdus[1].columns.names == all_serialized_names
@pytest.mark.parametrize('name_col', list(mixin_cols.items()))
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_fits_mixins_per_column(table_cls, name_col, tmpdir):
"""Test write/read one col at a time and do detailed validation"""
filename = str(tmpdir.join('test_simple.fits'))
name, col = name_col
c = [1.0, 2.0]
t = table_cls([c, col, c], names=['c1', name, 'c2'])
t[name].info.description = 'my \n\n\n description'
t[name].info.meta = {'list': list(range(50)), 'dict': {'a': 'b' * 200}}
if not t.has_mixin_columns:
pytest.skip('column is not a mixin (e.g. Quantity subclass in Table)')
t.write(filename, format="fits")
t2 = table_cls.read(filename, format='fits', astropy_native=True)
if isinstance(col, Time):
# FITS Time does not preserve format
t2[name].format = col.format
assert t.colnames == t2.colnames
for colname in t.colnames:
compare = ['data'] if colname in ('c1', 'c2') else compare_attrs[colname]
assert_objects_equal(t[colname], t2[colname], compare)
# Special case to make sure Column type doesn't leak into Time class data
if name.startswith('tm'):
assert t2[name]._time.jd1.__class__ is np.ndarray
assert t2[name]._time.jd2.__class__ is np.ndarray
@pytest.mark.parametrize('name_col', unsupported_cols.items())
@pytest.mark.xfail(reason='column type unsupported')
def test_fits_unsupported_mixin(self, name_col, tmpdir):
# Check that we actually fail in writing unsupported columns defined
# on top.
filename = str(tmpdir.join('test_simple.fits'))
name, col = name_col
Table([col], names=[name]).write(filename, format='fits')
def test_info_attributes_with_no_mixins(tmpdir):
"""Even if there are no mixin columns, if there is metadata that would be lost it still
gets serialized
"""
filename = str(tmpdir.join('test.fits'))
t = Table([[1.0, 2.0]])
t['col0'].description = 'hello' * 40
t['col0'].format = '{:8.4f}'
t['col0'].meta['a'] = {'b': 'c'}
t.write(filename, overwrite=True)
t2 = Table.read(filename)
assert t2['col0'].description == 'hello' * 40
assert t2['col0'].format == '{:8.4f}'
assert t2['col0'].meta['a'] == {'b': 'c'}
@pytest.mark.parametrize('method', ['set_cols', 'names', 'class'])
def test_round_trip_masked_table_serialize_mask(tmpdir, method):
"""
Same as previous test but set the serialize_method to 'data_mask' so mask is
written out and the behavior is all correct.
"""
filename = str(tmpdir.join('test.fits'))
t = simple_table(masked=True) # int, float, and str cols with one masked element
# MaskedColumn but no masked elements. See table the MaskedColumnInfo class
# _represent_as_dict() method for info about we test a column with no masked elements.
t['d'] = [1, 2, 3]
if method == 'set_cols':
for col in t.itercols():
col.info.serialize_method['fits'] = 'data_mask'
t.write(filename)
elif method == 'names':
t.write(filename, serialize_method={'a': 'data_mask', 'b': 'data_mask',
'c': 'data_mask', 'd': 'data_mask'})
elif method == 'class':
t.write(filename, serialize_method='data_mask')
t2 = Table.read(filename)
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# Data under the mask round-trips also (unmask data to show this).
t[name].mask = False
t2[name].mask = False
assert np.all(t2[name] == t[name])
def test_meta_not_modified(tmpdir):
filename = str(tmpdir.join('test.fits'))
t = Table(data=[Column([1, 2], 'a', description='spam')])
t.meta['comments'] = ['a', 'b']
assert len(t.meta) == 1
t.write(filename)
assert len(t.meta) == 1
assert t.meta['comments'] == ['a', 'b']
| 39.845898
| 116
| 0.58699
|
1cb80c0ce61c5a98818e4a429c09f9a77de9aa99
| 1,779
|
py
|
Python
|
myerp/nael/series.py
|
Ouahdani/myerp
|
9c14f193b6cab26968df088c18c8e745e75844b6
|
[
"MIT"
] | null | null | null |
myerp/nael/series.py
|
Ouahdani/myerp
|
9c14f193b6cab26968df088c18c8e745e75844b6
|
[
"MIT"
] | null | null | null |
myerp/nael/series.py
|
Ouahdani/myerp
|
9c14f193b6cab26968df088c18c8e745e75844b6
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr
from frappe import msgprint, throw, _
from frappe.model.document import Document
from frappe.model.naming import parse_naming_series
from frappe.permissions import get_doctypes_with_read
from erpnext.setup.doctype.naming_series.naming_series import NamingSeries
def scrub_options_list(self, ol):
serial_list = filter(lambda x: x, [cstr(n).strip() for n in ol])
liste = list(serial_list)
return liste
def get_current(self, arg=None):
"""get series current"""
if self.prefix:
prefix = self.parse_naming_series()
self.current_value = frappe.db.get_value("Series",
prefix, "current", order_by = "name")
self.code_journal = frappe.db.get_value("Series",
prefix, "code_jour", order_by = "name")
def update_series_start(self):
if self.prefix:
prefix = self.parse_naming_series()
self.insert_series(prefix)
frappe.db.sql("update `tabSeries` set current = %s where name = %s",
(self.current_value, prefix))
frappe.db.sql("update `tabSeries` set code_jour = %s where name = %s",
(self.code_journal, prefix))
msgprint(_("Series Updated Successfully"))
else:
msgprint(_("Please select prefix first"))
@frappe.whitelist(allow_guest=False)
def build_options(self):
"""update series list"""
NamingSeries.scrub_options_list = scrub_options_list
@frappe.whitelist(allow_guest=False)
def build_code_journal(self):
"""update series list"""
NamingSeries.get_current = get_current
NamingSeries.update_series_start = update_series_start
| 33.566038
| 82
| 0.722878
|
1db77e4ca8fbfedba603bc7188cc7d5007e5d762
| 3,721
|
py
|
Python
|
pymatviz/correlation.py
|
janosh/pymatviz
|
17742d161151db1f9e1d615e6f19a3eac678be27
|
[
"MIT"
] | null | null | null |
pymatviz/correlation.py
|
janosh/pymatviz
|
17742d161151db1f9e1d615e6f19a3eac678be27
|
[
"MIT"
] | 8
|
2022-03-01T21:11:01.000Z
|
2022-03-20T16:46:46.000Z
|
pymatviz/correlation.py
|
janosh/pymatviz
|
17742d161151db1f9e1d615e6f19a3eac678be27
|
[
"MIT"
] | null | null | null |
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.axes import Axes
from pymatviz.utils import NumArray
def marchenko_pastur_pdf(x: float, gamma: float, sigma: float = 1) -> float:
"""The Marchenko-Pastur probability density function describes the
distribution of singular values of large rectangular random matrices.
See https://wikipedia.org/wiki/Marchenko-Pastur_distribution.
By comparing the eigenvalue distribution of a correlation matrix to this
PDF, one can gauge the significance of correlations.
Args:
x (float): Position at which to compute probability density.
gamma (float): Also referred to as lambda. The distribution's main parameter
that measures how well sampled the data is.
sigma (float, optional): Standard deviation of random variables assumed
to be independent identically distributed. Defaults to 1 as
appropriate for correlation matrices.
Returns:
float: Marchenko-Pastur density for given gamma at x
"""
lambda_m = (sigma * (1 - np.sqrt(1 / gamma))) ** 2 # Largest eigenvalue
lambda_p = (sigma * (1 + np.sqrt(1 / gamma))) ** 2 # Smallest eigenvalue
prefac = gamma / (2 * np.pi * sigma**2 * x)
root = np.sqrt((lambda_p - x) * (x - lambda_m))
unit_step = x > lambda_p or x < lambda_m
return prefac * root * (0 if unit_step else 1)
def marchenko_pastur(
matrix: NumArray,
gamma: float,
sigma: float = 1,
filter_high_evals: bool = False,
ax: Axes = None,
) -> Axes:
"""Plot the eigenvalue distribution of a symmetric matrix (usually a correlation
matrix) against the Marchenko Pastur distribution.
The probability of a random matrix having eigenvalues >= (1 + sqrt(gamma))^2 in the
absence of any signal is vanishingly small. Thus, if eigenvalues larger than that
appear, they correspond to statistically significant signals.
Args:
matrix (Array): 2d array
gamma (float): The Marchenko-Pastur ratio of random variables to observation
count. E.g. for N=1000 variables and p=500 observations of each,
gamma = p/N = 1/2.
sigma (float, optional): Standard deviation of random variables. Defaults to 1.
filter_high_evals (bool, optional): Whether to filter out eigenvalues larger
than theoretical random maximum. Useful for focusing the plot on the area
of the MP PDF. Defaults to False.
ax (Axes, optional): matplotlib Axes on which to plot. Defaults to None.
Returns:
ax: The plot's matplotlib Axes.
"""
if ax is None:
ax = plt.gca()
# use eigvalsh for speed since correlation matrix is symmetric
evals = np.linalg.eigvalsh(matrix)
lambda_m = (sigma * (1 - np.sqrt(1 / gamma))) ** 2 # Largest eigenvalue
lambda_p = (sigma * (1 + np.sqrt(1 / gamma))) ** 2 # Smallest eigenvalue
if filter_high_evals:
# Remove eigenvalues larger than those expected in a purely random matrix
evals = evals[evals <= lambda_p + 1]
ax.hist(evals, bins=50, edgecolor="black", density=True)
# Plot the theoretical density
mp_pdf = np.vectorize(lambda x: marchenko_pastur_pdf(x, gamma, sigma))
x = np.linspace(max(1e-4, lambda_m), lambda_p, 200)
ax.plot(x, mp_pdf(x), linewidth=5)
# Compute and display matrix rank
# A ratio less than one indicates an undersampled set of RVs
rank = np.linalg.matrix_rank(matrix)
n_rows = len(matrix)
plt.text(
*[0.95, 0.9],
f"rank deficiency: {rank}/{n_rows} {'(None)' if n_rows == rank else ''}",
transform=ax.transAxes,
ha="right",
)
return ax
| 37.585859
| 87
| 0.667831
|
9146d8a404a7bc18bf0bae5451d02232c9090827
| 977
|
py
|
Python
|
geni-lib/samples/userdata.py
|
AERPAW-Platform-Control/gateway
|
a80a25bb54a7cede82673f2385bb73d5aaa4963a
|
[
"MIT"
] | null | null | null |
geni-lib/samples/userdata.py
|
AERPAW-Platform-Control/gateway
|
a80a25bb54a7cede82673f2385bb73d5aaa4963a
|
[
"MIT"
] | null | null | null |
geni-lib/samples/userdata.py
|
AERPAW-Platform-Control/gateway
|
a80a25bb54a7cede82673f2385bb73d5aaa4963a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import geni.portal as portal
import geni.rspec.pg as rspec
import geni.rspec.igext as IG
import geni.rspec.emulab as emulab
from lxml import etree as ET
pc = portal.Context()
request = rspec.Request()
pc.defineParameter("param1", "dsc1", portal.ParameterType.INTEGER, 1)
pc.defineParameter("param2", "dsc2", portal.ParameterType.STRING, "value2")
params = pc.bindParameters()
ele2 = ET.Element("xmlstuff")
ET.SubElement(ele2, "evenmorexml")
node1 = IG.XenVM("node1")
iface1 = node1.addInterface("if1")
# Add user data to node1 in a single line
node1.UserData(emulab.UserDataSet({"data1":ele2, "data2":"val2"}))
link = rspec.Link("link")
link.addInterface(iface1)
# Add user data to link over several lines
linkdata = emulab.UserDataSet()
linkdata.addData("linkdata1", "val1")
linkdata.addData("linkdata2", "val2")
link.UserData(linkdata)
request.addResource(node1)
request.addResource(link)
pc.verifyParameters()
pc.printRequestRSpec(request)
| 23.829268
| 75
| 0.756397
|
73e9e4d8944439ed2bb807e81cd7d9d4db3b4ff7
| 1,537
|
py
|
Python
|
tests/properties_search_test.py
|
FabianoBFCarvalho/api-python
|
fbe8005da2207c45b1f2ca593a160e5f74d35171
|
[
"Apache-2.0"
] | null | null | null |
tests/properties_search_test.py
|
FabianoBFCarvalho/api-python
|
fbe8005da2207c45b1f2ca593a160e5f74d35171
|
[
"Apache-2.0"
] | null | null | null |
tests/properties_search_test.py
|
FabianoBFCarvalho/api-python
|
fbe8005da2207c45b1f2ca593a160e5f74d35171
|
[
"Apache-2.0"
] | null | null | null |
from google.appengine.ext import testbed
import webapp2
import webtest
import unittest
import json
from google.appengine.api import search
from views.properties_search import PropertiesSearch
class AppTest(unittest.TestCase):
def setUp(self):
app = webapp2.WSGIApplication([(r'/properties/search', PropertiesSearch)])
self.testapp = webtest.TestApp(app)
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_search_stub()
self.testbed.init_memcache_stub()
def tearDown(self):
self.testbed.deactivate()
def test_get_found(self):
doc = search.Document(doc_id='123456', fields=[
search.TextField(name='address', value='Rua Bela Flor'),
search.NumberField(name='value', value=50000)
])
search.Index(name='properties', namespace='ac-abc123').put(doc)
response = self.testapp.get('/properties/search?value_max=6000000')
print(response)
self.assertEqual(json.loads(response.body)['count'], 1)
def test_get_not_found(self):
doc = search.Document(doc_id='123456', fields=[
search.TextField(name='address', value='Rua Bela Flor'),
search.NumberField(name='vacancies', value=2)
])
search.Index(name='properties', namespace='ac-abc123').put(doc)
response = self.testapp.get('/properties/search?vacancies_min=3')
self.assertEqual(json.loads(response.body)['count'], 0)
| 36.595238
| 82
| 0.672088
|
3cb48c3d37e0c74ef7d5507c35724ab4d95cf448
| 797
|
py
|
Python
|
vint/linting/env.py
|
ViViDboarder/vint
|
b577a623e9279d44eb3dc81e6817c8eec25add63
|
[
"MIT"
] | null | null | null |
vint/linting/env.py
|
ViViDboarder/vint
|
b577a623e9279d44eb3dc81e6817c8eec25add63
|
[
"MIT"
] | null | null | null |
vint/linting/env.py
|
ViViDboarder/vint
|
b577a623e9279d44eb3dc81e6817c8eec25add63
|
[
"MIT"
] | null | null | null |
import sys
import os
import os.path
from pathlib import Path
from vint.linting.file_filter import find_vim_script
def build_environment(cmdargs):
return {
'cmdargs': cmdargs,
'home_path': _get_home_path(),
'xdg_config_home': _get_xdg_config_home(),
'cwd': _get_cwd(),
'file_paths': _get_file_paths(cmdargs),
}
def _get_cwd():
return Path(os.getcwd())
def _get_home_path():
return Path(os.path.expanduser('~'))
def _get_file_paths(cmdargs):
if 'files' not in cmdargs:
return []
found_file_paths = find_vim_script(map(Path, cmdargs['files']))
return found_file_paths
def _get_xdg_config_home():
return Path(os.environ.get(
"XDG_CONFIG_HOME",
str(_get_home_path().joinpath(".config"))
))
| 19.925
| 67
| 0.664994
|
cc68e11debd722809be961ddcdc35a5803489b73
| 371
|
py
|
Python
|
solutions/python3/312.py
|
sm2774us/amazon_interview_prep_2021
|
f580080e4a6b712b0b295bb429bf676eb15668de
|
[
"MIT"
] | 42
|
2020-08-02T07:03:49.000Z
|
2022-03-26T07:50:15.000Z
|
solutions/python3/312.py
|
ajayv13/leetcode
|
de02576a9503be6054816b7444ccadcc0c31c59d
|
[
"MIT"
] | null | null | null |
solutions/python3/312.py
|
ajayv13/leetcode
|
de02576a9503be6054816b7444ccadcc0c31c59d
|
[
"MIT"
] | 40
|
2020-02-08T02:50:24.000Z
|
2022-03-26T15:38:10.000Z
|
class Solution:
def maxCoins(self, nums):
memo, nums = {}, [1] + [num for num in nums if num] + [1]
def dfs(l, r):
if r - l == 1: return 0
if (l, r) not in memo: memo[(l, r)] = max(nums[l] * nums[i] * nums[r] + dfs(l, i) + dfs(i, r) for i in range(l + 1, r))
return memo[(l, r)]
return dfs(0, len(nums) - 1)
| 46.375
| 131
| 0.466307
|
5c3a6b9cd84603fc89f38a11b1527259457b7d10
| 17,645
|
py
|
Python
|
st2client/st2client/shell.py
|
momokuri-3/st2
|
0a7038723d701b433d7079b843cc76d4bf1ae8c9
|
[
"Apache-2.0"
] | 4,920
|
2015-01-01T15:12:17.000Z
|
2022-03-31T19:31:15.000Z
|
st2client/st2client/shell.py
|
momokuri-3/st2
|
0a7038723d701b433d7079b843cc76d4bf1ae8c9
|
[
"Apache-2.0"
] | 3,563
|
2015-01-05T19:02:19.000Z
|
2022-03-31T19:23:09.000Z
|
st2client/st2client/shell.py
|
momokuri-3/st2
|
0a7038723d701b433d7079b843cc76d4bf1ae8c9
|
[
"Apache-2.0"
] | 774
|
2015-01-01T20:41:24.000Z
|
2022-03-31T13:25:29.000Z
|
#!/usr/bin/env python
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Command-line interface to StackStorm.
"""
from __future__ import print_function
from __future__ import absolute_import
# Ignore CryptographyDeprecationWarning warnings which appear on older versions of Python 2.7
import warnings
from cryptography.utils import CryptographyDeprecationWarning
warnings.filterwarnings("ignore", category=CryptographyDeprecationWarning)
import os
import sys
import argcomplete
import argparse
import logging
import locale
import six
from six.moves.configparser import ConfigParser
from st2client import __version__
from st2client import models
from st2client.base import BaseCLIApp
from st2client.commands import auth
from st2client.commands import action
from st2client.commands import action_alias
from st2client.commands import keyvalue
from st2client.commands import inquiry
from st2client.commands import pack
from st2client.commands import policy
from st2client.commands import resource
from st2client.commands import sensor
from st2client.commands import trace
from st2client.commands import trigger
from st2client.commands import triggerinstance
from st2client.commands import timer
from st2client.commands import webhook
from st2client.commands import rule
from st2client.commands import rule_enforcement
from st2client.commands import rbac
from st2client.commands import workflow
from st2client.commands import service_registry
from st2client.config import set_config
from st2client.exceptions.operations import OperationFailureException
from st2client.utils.logging import LogLevelFilter, set_log_level_for_all_loggers
from st2client.utils.misc import reencode_list_with_surrogate_escape_sequences
from st2client.commands.auth import TokenCreateCommand
from st2client.commands.auth import LoginCommand
from st2client.utils.profiler import setup_regular_profiler
__all__ = ["Shell"]
LOGGER = logging.getLogger(__name__)
CLI_DESCRIPTION = (
"CLI for StackStorm event-driven automation platform. https://stackstorm.com"
)
USAGE_STRING = """
Usage: %(prog)s [options] <command> <sub command> [options]
For example:
%(prog)s action list --pack=st2
%(prog)s run core.local cmd=date
%(prog)s --debug run core.local cmd=date
""".strip()
NON_UTF8_LOCALE = (
"""
Locale %s with encoding %s which is not UTF-8 is used. This means that some functionality which
relies on outputting unicode characters won't work.
You are encouraged to use UTF-8 locale by setting LC_ALL environment variable to en_US.UTF-8 or
similar.
""".strip()
.replace("\n", " ")
.replace(" ", " ")
)
PACKAGE_METADATA_FILE_PATH = "/opt/stackstorm/st2/package.meta"
"""
Here we sanitize the provided args and ensure they contain valid unicode values.
By default, sys.argv will contain a unicode string where the actual item values which contain
unicode sequences are escaped using unicode surrogates.
For example, if "examples.test_rule_utf8_náme" value is specified as a CLI argument, sys.argv
and as such also url, would contain "examples.test_rule_utf8_n%ED%B3%83%ED%B2%A1me" which is not
what we want.
Complete sys.argv example:
1. Default - ['shell.py', '--debug', 'rule', 'get', 'examples.test_rule_utf8_n\udcc3\udca1me']
2. What we want - ['shell.py', '--debug', 'rule', 'get', 'examples.test_rule_utf8_náme']
This won't work correctly when sending requests to the API. As such, we correctly escape the
value to the unicode string here and then let the http layer (requests) correctly url encode
this value.
Technically, we could also just try to re-encode it in the HTTPClient and I tried that first, but
it turns out more code in the client results in exceptions if it's not re-encoded as early as
possible.
"""
REENCODE_ARGV = os.environ.get("ST2_CLI_RENCODE_ARGV", "true").lower() in [
"true",
"1",
"yes",
]
if REENCODE_ARGV:
try:
sys.argv = reencode_list_with_surrogate_escape_sequences(sys.argv)
except Exception as e:
print("Failed to re-encode sys.argv: %s" % (str(e)))
def get_stackstorm_version():
"""
Return StackStorm version including git commit revision if running a dev release and a file
with package metadata which includes git revision is available.
:rtype: ``str``
"""
if "dev" in __version__:
version = __version__
if not os.path.isfile(PACKAGE_METADATA_FILE_PATH):
return version
config = ConfigParser()
try:
config.read(PACKAGE_METADATA_FILE_PATH)
except Exception:
return version
try:
git_revision = config.get("server", "git_sha")
except Exception:
return version
version = "%s (%s)" % (version, git_revision)
else:
version = __version__
return version
class Shell(BaseCLIApp):
LOG = LOGGER
SKIP_AUTH_CLASSES = [
TokenCreateCommand.__name__,
LoginCommand.__name__,
]
def __init__(self):
# Set up of endpoints is delayed until program is run.
self.client = None
# Set up the main parser.
self.parser = argparse.ArgumentParser(description=CLI_DESCRIPTION)
# Set up general program options.
self.parser.add_argument(
"--version",
action="version",
version="%(prog)s {version}, on Python {python_major}.{python_minor}.{python_patch}".format(
version=get_stackstorm_version(),
python_major=sys.version_info.major,
python_minor=sys.version_info.minor,
python_patch=sys.version_info.micro,
),
)
self.parser.add_argument(
"--url",
action="store",
dest="base_url",
default=None,
help="Base URL for the API servers. Assumes all servers use the "
"same base URL and default ports are used. Get ST2_BASE_URL "
"from the environment variables by default.",
)
self.parser.add_argument(
"--auth-url",
action="store",
dest="auth_url",
default=None,
help="URL for the authentication service. Get ST2_AUTH_URL "
"from the environment variables by default.",
)
self.parser.add_argument(
"--api-url",
action="store",
dest="api_url",
default=None,
help="URL for the API server. Get ST2_API_URL "
"from the environment variables by default.",
)
self.parser.add_argument(
"--stream-url",
action="store",
dest="stream_url",
default=None,
help="URL for the stream endpoint. Get ST2_STREAM_URL"
"from the environment variables by default.",
)
self.parser.add_argument(
"--api-version",
action="store",
dest="api_version",
default=None,
help="API version to use. Get ST2_API_VERSION "
"from the environment variables by default.",
)
self.parser.add_argument(
"--cacert",
action="store",
dest="cacert",
default=None,
help="Path to the CA cert bundle for the SSL endpoints. "
"Get ST2_CACERT from the environment variables by default. "
"If this is not provided, then SSL cert will not be verified.",
)
self.parser.add_argument(
"--config-file",
action="store",
dest="config_file",
default=None,
help="Path to the CLI config file",
)
self.parser.add_argument(
"--print-config",
action="store_true",
dest="print_config",
default=False,
help="Parse the config file and print the values",
)
self.parser.add_argument(
"--skip-config",
action="store_true",
dest="skip_config",
default=False,
help="Don't parse and use the CLI config file",
)
self.parser.add_argument(
"--debug",
action="store_true",
dest="debug",
default=False,
help="Enable debug mode",
)
# Set up list of commands and subcommands.
self.subparsers = self.parser.add_subparsers(dest="parser")
self.subparsers.required = True
self.commands = {}
self.commands["run"] = action.ActionRunCommand(
models.Action, self, self.subparsers, name="run", add_help=False
)
self.commands["action"] = action.ActionBranch(
"An activity that happens as a response to the external event.",
self,
self.subparsers,
)
self.commands["action-alias"] = action_alias.ActionAliasBranch(
"Action aliases.", self, self.subparsers
)
self.commands["auth"] = auth.TokenCreateCommand(
models.Token, self, self.subparsers, name="auth"
)
self.commands["login"] = auth.LoginCommand(
models.Token, self, self.subparsers, name="login"
)
self.commands["whoami"] = auth.WhoamiCommand(
models.Token, self, self.subparsers, name="whoami"
)
self.commands["api-key"] = auth.ApiKeyBranch("API Keys.", self, self.subparsers)
self.commands["execution"] = action.ActionExecutionBranch(
"An invocation of an action.", self, self.subparsers
)
self.commands["inquiry"] = inquiry.InquiryBranch(
"Inquiries provide an opportunity to ask a question "
"and wait for a response in a workflow.",
self,
self.subparsers,
)
self.commands["key"] = keyvalue.KeyValuePairBranch(
"Key value pair is used to store commonly used configuration "
"for reuse in sensors, actions, and rules.",
self,
self.subparsers,
)
self.commands["pack"] = pack.PackBranch(
"A group of related integration resources: " "actions, rules, and sensors.",
self,
self.subparsers,
)
self.commands["policy"] = policy.PolicyBranch(
"Policy that is enforced on a resource.", self, self.subparsers
)
self.commands["policy-type"] = policy.PolicyTypeBranch(
"Type of policy that can be applied to resources.", self, self.subparsers
)
self.commands["rule"] = rule.RuleBranch(
'A specification to invoke an "action" on a "trigger" selectively '
"based on some criteria.",
self,
self.subparsers,
)
self.commands["webhook"] = webhook.WebhookBranch(
"Webhooks.", self, self.subparsers
)
self.commands["timer"] = timer.TimerBranch("Timers.", self, self.subparsers)
self.commands["runner"] = resource.ResourceBranch(
models.RunnerType,
"Runner is a type of handler for a specific class of actions.",
self,
self.subparsers,
read_only=True,
has_disable=True,
)
self.commands["sensor"] = sensor.SensorBranch(
"An adapter which allows you to integrate StackStorm with external system.",
self,
self.subparsers,
)
self.commands["trace"] = trace.TraceBranch(
"A group of executions, rules and triggerinstances that are related.",
self,
self.subparsers,
)
self.commands["trigger"] = trigger.TriggerTypeBranch(
"An external event that is mapped to a st2 input. It is the "
"st2 invocation point.",
self,
self.subparsers,
)
self.commands["trigger-instance"] = triggerinstance.TriggerInstanceBranch(
"Actual instances of triggers received by st2.", self, self.subparsers
)
self.commands["rule-enforcement"] = rule_enforcement.RuleEnforcementBranch(
"Models that represent enforcement of rules.", self, self.subparsers
)
self.commands["workflow"] = workflow.WorkflowBranch(
"Commands for workflow authoring related operations. "
"Only orquesta workflows are supported.",
self,
self.subparsers,
)
# Service Registry
self.commands["service-registry"] = service_registry.ServiceRegistryBranch(
"Service registry group and membership related commands.",
self,
self.subparsers,
)
# RBAC
self.commands["role"] = rbac.RoleBranch("RBAC roles.", self, self.subparsers)
self.commands["role-assignment"] = rbac.RoleAssignmentBranch(
"RBAC role assignments.", self, self.subparsers
)
def run(self, argv):
debug = False
parser = self.parser
if len(argv) == 0:
# Print a more user-friendly help string if no arguments are provided
# Note: We only set usage variable for the main parser. If we passed "usage" argument
# to the main ArgumentParser class above, this would also set a custom usage string for
# sub-parsers which we don't want.
parser.usage = USAGE_STRING
sys.stderr.write(parser.format_help())
return 2
# Provide autocomplete for shell
argcomplete.autocomplete(self.parser)
if "--print-config" in argv:
# Hack because --print-config requires no command to be specified
argv = argv + ["action", "list"]
# Parse command line arguments.
args = self.parser.parse_args(args=argv)
print_config = args.print_config
if print_config:
self._print_config(args=args)
return 3
# Parse config and store it in the config module
config = self._parse_config_file(args=args, validate_config_permissions=False)
set_config(config=config)
self._check_locale_and_print_warning()
# Setup client and run the command
try:
debug = getattr(args, "debug", False)
if debug:
set_log_level_for_all_loggers(level=logging.DEBUG)
# Set up client.
self.client = self.get_client(args=args, debug=debug)
# TODO: This is not so nice work-around for Python 3 because of a breaking change in
# Python 3 - https://bugs.python.org/issue16308
try:
func = getattr(args, "func")
except AttributeError:
parser.print_help()
sys.exit(2)
# Execute command.
func(args)
return 0
except OperationFailureException:
if debug:
self._print_debug_info(args=args)
return 2
except Exception as e:
# We allow exception to define custom exit codes
exit_code = getattr(e, "exit_code", 1)
print("ERROR: %s\n" % e)
if debug:
self._print_debug_info(args=args)
return exit_code
def _print_config(self, args):
config = self._parse_config_file(args=args)
for section, options in six.iteritems(config):
print("[%s]" % (section))
for name, value in six.iteritems(options):
print("%s = %s" % (name, value))
def _check_locale_and_print_warning(self):
"""
Method which checks that unicode locale is used and prints a warning if it's not.
"""
try:
default_locale = locale.getdefaultlocale()[0]
preferred_encoding = locale.getpreferredencoding()
except ValueError:
# Ignore unknown locale errors for now
default_locale = "unknown"
preferred_encoding = "unknown"
if preferred_encoding and preferred_encoding.lower() != "utf-8":
msg = NON_UTF8_LOCALE % (default_locale or "unknown", preferred_encoding)
LOGGER.warn(msg)
def setup_logging(argv):
debug = "--debug" in argv
root = LOGGER
root.setLevel(logging.WARNING)
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(logging.WARNING)
formatter = logging.Formatter("%(asctime)s %(levelname)s - %(message)s")
handler.setFormatter(formatter)
if not debug:
handler.addFilter(LogLevelFilter(log_levels=[logging.ERROR]))
root.addHandler(handler)
def main(argv=sys.argv[1:]):
setup_logging(argv)
if "--enable-profiler" in sys.argv:
setup_regular_profiler(service_name="st2cli")
sys.argv.remove("--enable-profiler")
argv.remove("--enable-profiler")
return Shell().run(argv)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| 32.31685
| 104
| 0.630037
|
bd195965377e666d8f396b4c974e6d54af054338
| 1,670
|
py
|
Python
|
russia (1).py
|
dmliu16/geotunes
|
8cfc9d7760e7a8050aace7a405e65bec7d287982
|
[
"CC-BY-3.0"
] | 1
|
2021-05-21T02:18:42.000Z
|
2021-05-21T02:18:42.000Z
|
russia (1).py
|
dmliu16/geotunes
|
8cfc9d7760e7a8050aace7a405e65bec7d287982
|
[
"CC-BY-3.0"
] | null | null | null |
russia (1).py
|
dmliu16/geotunes
|
8cfc9d7760e7a8050aace7a405e65bec7d287982
|
[
"CC-BY-3.0"
] | null | null | null |
# russia
year1 = [["Ёлка","Прованс"],["Слава","Одиночество"],["Serebro","Я Тебя Не Отдам"],
["Дима Билан","Болен Тобой"],["Фабрика","А Я За Тобой"],["Сати Казанова","До Рассвета"],
["Пицца","Оружие"],["Винтаж","Знак Водолея"],["30.02","Звёзды В Лужах"],
["Сати Казанова","Чувство Лёгкости"]]
import random
lst = []
for i in range(63):
lst += [random.random()]
[0.039477745253817975, 0.7253942868919809, 0.637431006602568, 0.24876082057842752,
0.3611270619481991, 0.627941214267894, 0.9933643338741133, 0.0017147308563305597,
0.9792487260861582, 0.021980251317077926, 0.14437472761371994, 0.3413503048228216,
0.4730525016600159, 0.9360244331613186, 0.7725561139743328, 0.03507098458602509,
0.043370852879301425, 0.6414286566026913, 0.9805431488190012, 0.015752550740476212,
0.22877750983843026, 0.4216047703141541, 0.7950590457794464, 0.9767587987622901,
0.14204156273869217, 0.23032827703430325, 0.7969954186812338, 0.2109605613434331,
0.4289935924926075, 0.8691259576715332, 0.8070482958439469, 0.9857047090844336,
0.7484259079401977, 0.08523226376385684, 0.8611063848116214, 0.20617774324250382,
0.22060103394663377, 0.38888958788648065, 0.17308258940252086, 0.7654579144508673,
0.5405284494755208, 0.8712037075997128, 0.25325275527263136, 0.47323600257460463,
0.6953493313423972, 0.12232588519059617, 0.33338878477762435, 0.9385394102835742,
0.8514668606306861, 0.7963080420618799, 0.7821837347057755, 0.6934922845005662,
0.4874158407861593, 0.7460209253417306, 0.8485735723282192, 0.19422768656460254,
0.37968523131138177, 0.3744030003276628, 0.39791919808148235, 0.734885370411839,
0.9539931932132405, 0.04008308015375839, 0.6785169657462848]
| 57.586207
| 89
| 0.794012
|
e8cefa5cd7eadb5da14e33b22ae662ba6a52ea9b
| 2,230
|
py
|
Python
|
utils/build_swift/tests/test_migration.py
|
LynnKirby/swift
|
11a8be196751f8f7c194bea83b5f4f8e6a7d5360
|
[
"Apache-2.0"
] | 10
|
2019-05-11T02:17:28.000Z
|
2022-02-06T15:37:53.000Z
|
utils/build_swift/tests/test_migration.py
|
LynnKirby/swift
|
11a8be196751f8f7c194bea83b5f4f8e6a7d5360
|
[
"Apache-2.0"
] | 3
|
2018-09-27T23:09:06.000Z
|
2018-10-18T18:12:20.000Z
|
utils/build_swift/tests/test_migration.py
|
LynnKirby/swift
|
11a8be196751f8f7c194bea83b5f4f8e6a7d5360
|
[
"Apache-2.0"
] | 3
|
2017-08-28T13:45:21.000Z
|
2018-06-28T10:53:37.000Z
|
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
from __future__ import absolute_import, unicode_literals
from .utils import TestCase, add_metaclass
from .. import migration
def _get_sdk_targets(sdk_names):
targets = []
for sdk_name in sdk_names:
targets += migration._SDK_TARGETS[sdk_name]
return targets
def _get_sdk_target_names(sdk_names):
return [target.name for target in _get_sdk_targets(sdk_names)]
# -----------------------------------------------------------------------------
class TestMigrateSwiftSDKsMeta(type):
"""Metaclass used to dynamically generate test methods.
"""
def __new__(cls, name, bases, attrs):
# Generate tests for migrating each Swift SDK
for sdk_name in migration._SDK_TARGETS.keys():
test_name = 'test_migrate_swift_sdk_' + sdk_name
attrs[test_name] = cls.generate_migrate_swift_sdks_test(sdk_name)
return super(TestMigrateSwiftSDKsMeta, cls).__new__(
cls, name, bases, attrs)
@classmethod
def generate_migrate_swift_sdks_test(cls, sdk_name):
def test(self):
args = ['--swift-sdks={}'.format(sdk_name)]
args = migration.migrate_swift_sdks(args)
target_names = _get_sdk_target_names([sdk_name])
self.assertListEqual(args, [
'--stdlib-deployment-targets={}'.format(' '.join(target_names))
])
return test
@add_metaclass(TestMigrateSwiftSDKsMeta)
class TestMigrateSwiftSDKs(TestCase):
def test_multiple_swift_sdk_flags(self):
args = [
'--swift-sdks=OSX',
'--swift-sdks=OSX;IOS;IOS_SIMULATOR'
]
args = migration.migrate_swift_sdks(args)
target_names = _get_sdk_target_names(['OSX', 'IOS', 'IOS_SIMULATOR'])
self.assertListEqual(args, [
'--stdlib-deployment-targets={}'.format(' '.join(target_names))
])
| 30.972222
| 79
| 0.652915
|
6fcf20e4bbbaec8b0e32841dee7faccdedb09a2a
| 1,804
|
py
|
Python
|
monk/tf_keras_1/models/params.py
|
gstearmit/monk_v1
|
89184ae27dc6d134620034d5b12aa86473ea47ba
|
[
"Apache-2.0"
] | null | null | null |
monk/tf_keras_1/models/params.py
|
gstearmit/monk_v1
|
89184ae27dc6d134620034d5b12aa86473ea47ba
|
[
"Apache-2.0"
] | null | null | null |
monk/tf_keras_1/models/params.py
|
gstearmit/monk_v1
|
89184ae27dc6d134620034d5b12aa86473ea47ba
|
[
"Apache-2.0"
] | 1
|
2020-10-07T12:57:44.000Z
|
2020-10-07T12:57:44.000Z
|
from tf_keras_1.models.imports import *
from system.imports import *
from tf_keras_1.models.models import combined_list_lower
@accepts(str, dict, post_trace=True)
@TraceFunction(trace_args=False, trace_rv=False)
def set_model_name(name, system_dict):
if(name not in combined_list_lower):
msg = "Model name {} not in {}".format(name, combined_list_lower);
raise ConstraintError(msg);
system_dict["model"]["params"]["model_name"] = name;
return system_dict;
@accepts(float, dict, post_trace=True)
@TraceFunction(trace_args=False, trace_rv=False)
def set_gpu_memory_fraction(value, system_dict):
system_dict["model"]["params"]["gpu_memory_fraction"] = value;
return system_dict;
@accepts(bool, dict, post_trace=True)
@TraceFunction(trace_args=False, trace_rv=False)
def set_device(value, system_dict):
GPUs = GPUtil.getGPUs()
if(value and len(GPUs)==0):
msg = "GPU not accessible yet requested."
ConstraintWarning(msg)
system_dict["model"]["params"]["use_gpu"] = False;
else:
system_dict["model"]["params"]["use_gpu"] = value;
return system_dict;
@accepts(bool, dict, post_trace=True)
@TraceFunction(trace_args=False, trace_rv=False)
def set_pretrained(value, system_dict):
system_dict["model"]["params"]["use_pretrained"] = value;
return system_dict;
@accepts(bool, dict, post_trace=True)
@TraceFunction(trace_args=False, trace_rv=False)
def set_freeze_base_network(value, system_dict):
system_dict["model"]["params"]["freeze_base_network"] = value;
return system_dict;
@accepts([str, list], dict, post_trace=True)
@TraceFunction(trace_args=False, trace_rv=False)
def set_model_path(path, system_dict):
system_dict["model"]["params"]["model_path"] = path;
return system_dict;
| 31.649123
| 74
| 0.722838
|
e952ab888ebcba10a3837f4750ebbe8825c277c7
| 625
|
py
|
Python
|
example_project_app/urls.py
|
bihealth/sodar_core
|
a6c22c4f276b64ffae6de48779a82d59a60a9333
|
[
"MIT"
] | 11
|
2019-06-26T14:05:58.000Z
|
2020-12-05T02:20:11.000Z
|
example_project_app/urls.py
|
bihealth/sodar_core
|
a6c22c4f276b64ffae6de48779a82d59a60a9333
|
[
"MIT"
] | 11
|
2019-07-01T06:17:44.000Z
|
2021-04-20T07:19:40.000Z
|
example_project_app/urls.py
|
bihealth/sodar-core
|
ac7397294b24aea352bc7842e75011ad8c1a2033
|
[
"MIT"
] | 4
|
2019-06-26T07:49:50.000Z
|
2020-05-19T21:58:10.000Z
|
from django.conf.urls import url
from example_project_app import views, views_api
app_name = 'example_project_app'
# NOTE: Name of object in kwarg which is a Project or has "project" as a member
# is expected to correspond 1:1 to the model in question (lowercase ok)!
urls = [
url(
regex=r'^(?P<project>[0-9a-f-]+)$',
view=views.ExampleView.as_view(),
name='example',
)
]
urls_api = [
url(
regex=r'^api/hello/(?P<project>[0-9a-f-]+)$',
view=views_api.HelloExampleProjectAPIView.as_view(),
name='example_api_hello',
)
]
urlpatterns = urls + urls_api
| 23.148148
| 79
| 0.6416
|
754c4176c660100df442eca420f70ba2ac51fa4c
| 9,873
|
py
|
Python
|
pkgs/sdk-pkg/src/genie/libs/sdk/apis/apic/firmware/execute.py
|
jbronikowski/genielibs
|
200a34e5fe4838a27b5a80d5973651b2e34ccafb
|
[
"Apache-2.0"
] | 94
|
2018-04-30T20:29:15.000Z
|
2022-03-29T13:40:31.000Z
|
pkgs/sdk-pkg/src/genie/libs/sdk/apis/apic/firmware/execute.py
|
jbronikowski/genielibs
|
200a34e5fe4838a27b5a80d5973651b2e34ccafb
|
[
"Apache-2.0"
] | 67
|
2018-12-06T21:08:09.000Z
|
2022-03-29T18:00:46.000Z
|
pkgs/sdk-pkg/src/genie/libs/sdk/apis/apic/firmware/execute.py
|
jbronikowski/genielibs
|
200a34e5fe4838a27b5a80d5973651b2e34ccafb
|
[
"Apache-2.0"
] | 49
|
2018-06-29T18:59:03.000Z
|
2022-03-10T02:07:59.000Z
|
""" Execute type APIs for APIC """
import time
import logging
from genie.utils.timeout import Timeout
log = logging.getLogger(__name__)
def execute_clear_firmware_repository(device, sleep_after_delete=5):
""" Clears the firmware repository.
Args:
device (obj): Device to execute on
sleep_after_delete (int, optional): Time in seconds to sleep
after clearing the firmware repository. Defaults to 5.
Returns:
True if firmware repository is emptied
False if firmware repository cannot be emptied
Raises:
N/A
"""
images = device.api.get_firmware_repository_images()
if not images:
return True
for image in images:
device.execute('firmware repository delete {}'.format(image))
log.info('Sleeping for {} seconds to ensure the repository updates'
.format(sleep_after_delete))
time.sleep(sleep_after_delete)
if device.api.get_firmware_repository_images():
return False
else:
return True
def execute_install_controller_group_firmware(
device,
controller_image,
error_patterns=None,
controller_upgrade_max_time=1800,
controller_upgrade_check_interval=60,
controller_reconnect_max_time=900,
controller_reconnect_check_interval=60,
controller_upgrade_after_reconnect_max_time=300,
controller_upgrade_after_reconnect_check_interval=60
):
""" Installs the controller image onto the controller(s) and verifies install
completed.
Args:
device (obj): Device to execute on
controller_image (str): Image to install. This must exist in the
firmware repository.
error_patterns (list, optional): Any extra error patterns for executing
'firmware upgrade controller-group'. Defaults to None.
controller_upgrade_max_time (int, optional): Max time in seconds allowed
for verifying controller upgrade. Defaults to 1800.
controller_upgrade_check_interval (int, optional): How often in seconds
to check upgrade status. Defaults to 60.
controller_reconnect_max_time (int optional): Max time in seconds allowed
for reconnecting to controller if the connection is lost. Defaults
to 900.
controller_reconnect_check_interval (int, optional): How often in
seconds to attempt reconnect. Defaults to 60.
controller_upgrade_after_reconnect_max_time (int, optional): Max time
in seconds allowed for verifying controller upgrade after reconnect.
Defaults to 300.
controller_upgrade_after_reconnect_check_interval (int, optional): How
often in seconds to check upgrade status after reconnect. Defaults
to 60.
Returns:
True if install succeeds
False if install failed
Raises:
N/A
"""
errors = [r".*Command execution failed.*"]
if error_patterns:
errors.extend(error_patterns)
device.configure(['firmware',
'controller-group',
'firmware-version {}'.format(controller_image)])
try:
device.execute('firmware upgrade controller-group',
error_pattern=errors)
except Exception as e:
log.error("Firmware upgrade command failed: {}".format(str(e)))
return False
try:
result = device.api.verify_firmware_upgrade_status(
status='success',
firmware_group='controller-group',
max_time=controller_upgrade_max_time,
check_interval=controller_upgrade_check_interval)
except Exception as e:
# An exception is expected to be raised when upgrading a
# controller-group with only one controller as the device
# will drop the connection during the upgrade process
log.info("Reattempting connection in-case there was only "
"one controller. Error message: {}".format(str(e)))
# Attempt device reconnection
timeout = Timeout(controller_reconnect_max_time,
controller_reconnect_check_interval)
while timeout.iterate():
timeout.sleep()
device.destroy()
try:
device.connect(learn_hostname=True)
except Exception:
log.info("Cannot connect to {dev}".format(
dev=device.hostname))
else:
break
# If not connected after timeout, fail
if not device.connected:
return False
# Reconnected, so check firmware upgrade status
result = device.api.verify_firmware_upgrade_status(
status='success',
firmware_group='controller-group',
max_time=controller_upgrade_after_reconnect_max_time,
check_interval=controller_upgrade_after_reconnect_check_interval)
return result
def execute_install_switch_group_firmware(
device,
switch_image,
switch_node_ids,
switch_group_name='switches',
clear_switch_group=True,
error_patterns=None,
switch_upgrade_max_time=2700,
switch_upgrade_check_interval=60,
stabilize_switch_group_config_sleep=120,
controller_reconnect_max_time=900,
controller_reconnect_check_interval=60,
):
""" Installs the switch image on the switch(s) and then verifies the install
completed.
Args:
device (obj): Device to execute on
switch_image (str): Image to install. This must exist in the
firmware repository.
switch_node_ids (str): String of node IDs to install the image on. The
node IDs must be separated by a comma.
switch_group_name (str, optional): Name for the switch-group that will
be configured. Defaults to switches.
clear_switch_group (bool, optional): Whether or not to clear the
switch-group configuration before applying new configuration.
Defaults to True.
error_patterns (list, optional): Any extra error patterns for executing
'firmware upgrade switch-group {name}'. Defaults to None.
switch_upgrade_max_time (int, optional): Max time in seconds allowed for
verifying upgrade status. Defaults to 2700.
switch_upgrade_check_interval (int, optional): How often in seconds to
check upgrade status. Defaults to 60.
stabilize_switch_group_config_sleep (int, optional): How long in seconds
to sleep after configuring switch-group. Defaults to 120.
controller_reconnect_max_time (int optional): Max time in seconds allowed
for reconnecting to controller if the connection is lost. Defaults
to 900.
controller_reconnect_check_interval (int, optional): How often in
seconds to attempt reconnect. Defaults to 60.
Returns:
True if install succeeds
False if install failed
Raises:
N/A
"""
errors = [r".*Command execution failed.*"]
if error_patterns:
errors.extend(error_patterns)
if clear_switch_group:
log.info("Clearing switch-group configuration because the "
"argument 'clear_switch_group' is True")
device.configure(['firmware',
'no switch-group {}'.format(switch_group_name)])
device.configure(['firmware',
'switch-group {}'.format(switch_group_name),
'switch {}'.format(switch_node_ids),
'firmware-version {}'.format(switch_image)])
log.info("Sleeping for '{}' seconds to allow the newly configured switch-group "
"to stabilize".format(stabilize_switch_group_config_sleep))
time.sleep(stabilize_switch_group_config_sleep)
try:
device.execute(
'firmware upgrade switch-group {}'.format(switch_group_name),
error_pattern=error_patterns
)
except Exception as e:
log.error("Firmware upgrade command failed: {}".format(str(e)))
return False
try:
result = device.api.verify_firmware_upgrade_status(
status='success',
firmware_group='switch-group {}'.format(switch_group_name),
max_time=switch_upgrade_max_time,
check_interval=switch_upgrade_check_interval)
except Exception as e:
# An exception can be raised if the controller does not respond to
# the 'show firmware upgrade status' command. Disconnect and reconnect
# to ensure the controller is ready for commands to be issued.
log.info("Reattempting connection as the device '{dev}' returned "
"nothing after executing a command.\nError: {e}".
format(dev=device.hostname, e=str(e)))
# Attempt device reconnection
timeout = Timeout(controller_reconnect_max_time,
controller_reconnect_check_interval)
while timeout.iterate():
timeout.sleep()
device.destroy()
try:
device.connect(learn_hostname=True)
except Exception:
log.info("Cannot connect to {dev}".format(
dev=device.hostname))
else:
break
# If not connected after timeout, fail
if not device.connected:
return False
# Reconnected, so check firmware upgrade status
result = device.api.verify_firmware_upgrade_status(
status='success',
firmware_group='switch-group {}'.format(switch_group_name),
max_time=switch_upgrade_max_time,
check_interval=switch_upgrade_check_interval)
return result
| 35.260714
| 84
| 0.648334
|
8437193c2737d68ca770d5fd5abc843edfed46e1
| 91,111
|
py
|
Python
|
sympy/integrals/transforms.py
|
amitport/sympy
|
d62e689e4f2944939181c89d4b9e99c6b8e2ffa9
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/integrals/transforms.py
|
amitport/sympy
|
d62e689e4f2944939181c89d4b9e99c6b8e2ffa9
|
[
"BSD-3-Clause"
] | 3
|
2022-02-04T14:45:16.000Z
|
2022-02-04T14:45:45.000Z
|
sympy/integrals/transforms.py
|
mlliarm/sympy
|
3b96442d2d72d3fe3243788372898898bad72280
|
[
"BSD-3-Clause"
] | null | null | null |
""" Integral Transforms """
from functools import reduce, wraps
from itertools import repeat
from sympy.core import S, pi, I
from sympy.core.add import Add
from sympy.core.function import (AppliedUndef, count_ops, Derivative, expand,
expand_complex, expand_mul, Function, Lambda,
WildFunction)
from sympy.core.mul import Mul
from sympy.core.numbers import igcd, ilcm
from sympy.core.relational import _canonical, Ge, Gt, Lt, Unequality, Eq
from sympy.core.sorting import default_sort_key, ordered
from sympy.core.symbol import Dummy, symbols, Wild
from sympy.core.traversal import postorder_traversal
from sympy.functions.combinatorial.factorials import factorial, rf
from sympy.functions.elementary.complexes import (re, arg, Abs, polar_lift,
periodic_argument)
from sympy.functions.elementary.exponential import exp, log, exp_polar
from sympy.functions.elementary.hyperbolic import cosh, coth, sinh, tanh, asinh
from sympy.functions.elementary.integers import ceiling
from sympy.functions.elementary.miscellaneous import Max, Min, sqrt
from sympy.functions.elementary.piecewise import Piecewise, piecewise_fold
from sympy.functions.elementary.trigonometric import cos, cot, sin, tan, atan
from sympy.functions.special.bessel import besseli, besselj, besselk, bessely
from sympy.functions.special.delta_functions import DiracDelta, Heaviside
from sympy.functions.special.error_functions import erf, erfc, Ei
from sympy.functions.special.gamma_functions import digamma, gamma, lowergamma
from sympy.functions.special.hyper import meijerg
from sympy.integrals import integrate, Integral
from sympy.integrals.meijerint import _dummy
from sympy.logic.boolalg import to_cnf, conjuncts, disjuncts, Or, And
from sympy.matrices.matrices import MatrixBase
from sympy.polys.matrices.linsolve import _lin_eq2dict, PolyNonlinearError
from sympy.polys.polyroots import roots
from sympy.polys.polytools import factor, Poly
from sympy.polys.rationaltools import together
from sympy.polys.rootoftools import CRootOf, RootSum
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.iterables import iterable
from sympy.utilities.misc import debug
##########################################################################
# Helpers / Utilities
##########################################################################
class IntegralTransformError(NotImplementedError):
"""
Exception raised in relation to problems computing transforms.
Explanation
===========
This class is mostly used internally; if integrals cannot be computed
objects representing unevaluated transforms are usually returned.
The hint ``needeval=True`` can be used to disable returning transform
objects, and instead raise this exception if an integral cannot be
computed.
"""
def __init__(self, transform, function, msg):
super().__init__(
"%s Transform could not be computed: %s." % (transform, msg))
self.function = function
class IntegralTransform(Function):
"""
Base class for integral transforms.
Explanation
===========
This class represents unevaluated transforms.
To implement a concrete transform, derive from this class and implement
the ``_compute_transform(f, x, s, **hints)`` and ``_as_integral(f, x, s)``
functions. If the transform cannot be computed, raise :obj:`IntegralTransformError`.
Also set ``cls._name``. For instance,
>>> from sympy import LaplaceTransform
>>> LaplaceTransform._name
'Laplace'
Implement ``self._collapse_extra`` if your function returns more than just a
number and possibly a convergence condition.
"""
@property
def function(self):
""" The function to be transformed. """
return self.args[0]
@property
def function_variable(self):
""" The dependent variable of the function to be transformed. """
return self.args[1]
@property
def transform_variable(self):
""" The independent transform variable. """
return self.args[2]
@property
def free_symbols(self):
"""
This method returns the symbols that will exist when the transform
is evaluated.
"""
return self.function.free_symbols.union({self.transform_variable}) \
- {self.function_variable}
def _compute_transform(self, f, x, s, **hints):
raise NotImplementedError
def _as_integral(self, f, x, s):
raise NotImplementedError
def _collapse_extra(self, extra):
cond = And(*extra)
if cond == False:
raise IntegralTransformError(self.__class__.name, None, '')
return cond
def _try_directly(self, **hints):
T = None
try_directly = not any(func.has(self.function_variable)
for func in self.function.atoms(AppliedUndef))
if try_directly:
try:
T = self._compute_transform(self.function,
self.function_variable, self.transform_variable, **hints)
except IntegralTransformError:
T = None
fn = self.function
if not fn.is_Add:
fn = expand_mul(fn)
return fn, T
def doit(self, **hints):
"""
Try to evaluate the transform in closed form.
Explanation
===========
This general function handles linearity, but apart from that leaves
pretty much everything to _compute_transform.
Standard hints are the following:
- ``simplify``: whether or not to simplify the result
- ``noconds``: if True, do not return convergence conditions
- ``needeval``: if True, raise IntegralTransformError instead of
returning IntegralTransform objects
The default values of these hints depend on the concrete transform,
usually the default is
``(simplify, noconds, needeval) = (True, False, False)``.
"""
needeval = hints.pop('needeval', False)
simplify = hints.pop('simplify', True)
hints['simplify'] = simplify
fn, T = self._try_directly(**hints)
if T is not None:
return T
if fn.is_Add:
hints['needeval'] = needeval
res = [self.__class__(*([x] + list(self.args[1:]))).doit(**hints)
for x in fn.args]
extra = []
ress = []
for x in res:
if not isinstance(x, tuple):
x = [x]
ress.append(x[0])
if len(x) == 2:
# only a condition
extra.append(x[1])
elif len(x) > 2:
# some region parameters and a condition (Mellin, Laplace)
extra += [x[1:]]
if simplify==True:
res = Add(*ress).simplify()
else:
res = Add(*ress)
if not extra:
return res
try:
extra = self._collapse_extra(extra)
if iterable(extra):
return tuple([res]) + tuple(extra)
else:
return (res, extra)
except IntegralTransformError:
pass
if needeval:
raise IntegralTransformError(
self.__class__._name, self.function, 'needeval')
# TODO handle derivatives etc
# pull out constant coefficients
coeff, rest = fn.as_coeff_mul(self.function_variable)
return coeff*self.__class__(*([Mul(*rest)] + list(self.args[1:])))
@property
def as_integral(self):
return self._as_integral(self.function, self.function_variable,
self.transform_variable)
def _eval_rewrite_as_Integral(self, *args, **kwargs):
return self.as_integral
def _simplify(expr, doit):
if doit:
from sympy.simplify import simplify
from sympy.simplify.powsimp import powdenest
return simplify(powdenest(piecewise_fold(expr), polar=True))
return expr
def _noconds_(default):
"""
This is a decorator generator for dropping convergence conditions.
Explanation
===========
Suppose you define a function ``transform(*args)`` which returns a tuple of
the form ``(result, cond1, cond2, ...)``.
Decorating it ``@_noconds_(default)`` will add a new keyword argument
``noconds`` to it. If ``noconds=True``, the return value will be altered to
be only ``result``, whereas if ``noconds=False`` the return value will not
be altered.
The default value of the ``noconds`` keyword will be ``default`` (i.e. the
argument of this function).
"""
def make_wrapper(func):
@wraps(func)
def wrapper(*args, noconds=default, **kwargs):
res = func(*args, **kwargs)
if noconds:
return res[0]
return res
return wrapper
return make_wrapper
_noconds = _noconds_(False)
##########################################################################
# Mellin Transform
##########################################################################
def _default_integrator(f, x):
return integrate(f, (x, S.Zero, S.Infinity))
@_noconds
def _mellin_transform(f, x, s_, integrator=_default_integrator, simplify=True):
""" Backend function to compute Mellin transforms. """
# We use a fresh dummy, because assumptions on s might drop conditions on
# convergence of the integral.
s = _dummy('s', 'mellin-transform', f)
F = integrator(x**(s - 1) * f, x)
if not F.has(Integral):
return _simplify(F.subs(s, s_), simplify), (S.NegativeInfinity, S.Infinity), S.true
if not F.is_Piecewise: # XXX can this work if integration gives continuous result now?
raise IntegralTransformError('Mellin', f, 'could not compute integral')
F, cond = F.args[0]
if F.has(Integral):
raise IntegralTransformError(
'Mellin', f, 'integral in unexpected form')
def process_conds(cond):
"""
Turn ``cond`` into a strip (a, b), and auxiliary conditions.
"""
from sympy.solvers.inequalities import _solve_inequality
a = S.NegativeInfinity
b = S.Infinity
aux = S.true
conds = conjuncts(to_cnf(cond))
t = Dummy('t', real=True)
for c in conds:
a_ = S.Infinity
b_ = S.NegativeInfinity
aux_ = []
for d in disjuncts(c):
d_ = d.replace(
re, lambda x: x.as_real_imag()[0]).subs(re(s), t)
if not d.is_Relational or \
d.rel_op in ('==', '!=') \
or d_.has(s) or not d_.has(t):
aux_ += [d]
continue
soln = _solve_inequality(d_, t)
if not soln.is_Relational or \
soln.rel_op in ('==', '!='):
aux_ += [d]
continue
if soln.lts == t:
b_ = Max(soln.gts, b_)
else:
a_ = Min(soln.lts, a_)
if a_ is not S.Infinity and a_ != b:
a = Max(a_, a)
elif b_ is not S.NegativeInfinity and b_ != a:
b = Min(b_, b)
else:
aux = And(aux, Or(*aux_))
return a, b, aux
conds = [process_conds(c) for c in disjuncts(cond)]
conds = [x for x in conds if x[2] != False]
conds.sort(key=lambda x: (x[0] - x[1], count_ops(x[2])))
if not conds:
raise IntegralTransformError('Mellin', f, 'no convergence found')
a, b, aux = conds[0]
return _simplify(F.subs(s, s_), simplify), (a, b), aux
class MellinTransform(IntegralTransform):
"""
Class representing unevaluated Mellin transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute Mellin transforms, see the :func:`mellin_transform`
docstring.
"""
_name = 'Mellin'
def _compute_transform(self, f, x, s, **hints):
return _mellin_transform(f, x, s, **hints)
def _as_integral(self, f, x, s):
return Integral(f*x**(s - 1), (x, S.Zero, S.Infinity))
def _collapse_extra(self, extra):
a = []
b = []
cond = []
for (sa, sb), c in extra:
a += [sa]
b += [sb]
cond += [c]
res = (Max(*a), Min(*b)), And(*cond)
if (res[0][0] >= res[0][1]) == True or res[1] == False:
raise IntegralTransformError(
'Mellin', None, 'no combined convergence.')
return res
def mellin_transform(f, x, s, **hints):
r"""
Compute the Mellin transform `F(s)` of `f(x)`,
.. math :: F(s) = \int_0^\infty x^{s-1} f(x) \mathrm{d}x.
For all "sensible" functions, this converges absolutely in a strip
`a < \operatorname{Re}(s) < b`.
Explanation
===========
The Mellin transform is related via change of variables to the Fourier
transform, and also to the (bilateral) Laplace transform.
This function returns ``(F, (a, b), cond)``
where ``F`` is the Mellin transform of ``f``, ``(a, b)`` is the fundamental strip
(as above), and ``cond`` are auxiliary convergence conditions.
If the integral cannot be computed in closed form, this function returns
an unevaluated :class:`MellinTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`. If ``noconds=False``,
then only `F` will be returned (i.e. not ``cond``, and also not the strip
``(a, b)``).
Examples
========
>>> from sympy import mellin_transform, exp
>>> from sympy.abc import x, s
>>> mellin_transform(exp(-x), x, s)
(gamma(s), (0, oo), True)
See Also
========
inverse_mellin_transform, laplace_transform, fourier_transform
hankel_transform, inverse_hankel_transform
"""
return MellinTransform(f, x, s).doit(**hints)
def _rewrite_sin(m_n, s, a, b):
"""
Re-write the sine function ``sin(m*s + n)`` as gamma functions, compatible
with the strip (a, b).
Return ``(gamma1, gamma2, fac)`` so that ``f == fac/(gamma1 * gamma2)``.
Examples
========
>>> from sympy.integrals.transforms import _rewrite_sin
>>> from sympy import pi, S
>>> from sympy.abc import s
>>> _rewrite_sin((pi, 0), s, 0, 1)
(gamma(s), gamma(1 - s), pi)
>>> _rewrite_sin((pi, 0), s, 1, 0)
(gamma(s - 1), gamma(2 - s), -pi)
>>> _rewrite_sin((pi, 0), s, -1, 0)
(gamma(s + 1), gamma(-s), -pi)
>>> _rewrite_sin((pi, pi/2), s, S(1)/2, S(3)/2)
(gamma(s - 1/2), gamma(3/2 - s), -pi)
>>> _rewrite_sin((pi, pi), s, 0, 1)
(gamma(s), gamma(1 - s), -pi)
>>> _rewrite_sin((2*pi, 0), s, 0, S(1)/2)
(gamma(2*s), gamma(1 - 2*s), pi)
>>> _rewrite_sin((2*pi, 0), s, S(1)/2, 1)
(gamma(2*s - 1), gamma(2 - 2*s), -pi)
"""
# (This is a separate function because it is moderately complicated,
# and I want to doctest it.)
# We want to use pi/sin(pi*x) = gamma(x)*gamma(1-x).
# But there is one comlication: the gamma functions determine the
# inegration contour in the definition of the G-function. Usually
# it would not matter if this is slightly shifted, unless this way
# we create an undefined function!
# So we try to write this in such a way that the gammas are
# eminently on the right side of the strip.
m, n = m_n
m = expand_mul(m/pi)
n = expand_mul(n/pi)
r = ceiling(-m*a - n.as_real_imag()[0]) # Don't use re(n), does not expand
return gamma(m*s + n + r), gamma(1 - n - r - m*s), (-1)**r*pi
class MellinTransformStripError(ValueError):
"""
Exception raised by _rewrite_gamma. Mainly for internal use.
"""
pass
def _rewrite_gamma(f, s, a, b):
"""
Try to rewrite the product f(s) as a product of gamma functions,
so that the inverse Mellin transform of f can be expressed as a meijer
G function.
Explanation
===========
Return (an, ap), (bm, bq), arg, exp, fac such that
G((an, ap), (bm, bq), arg/z**exp)*fac is the inverse Mellin transform of f(s).
Raises IntegralTransformError or MellinTransformStripError on failure.
It is asserted that f has no poles in the fundamental strip designated by
(a, b). One of a and b is allowed to be None. The fundamental strip is
important, because it determines the inversion contour.
This function can handle exponentials, linear factors, trigonometric
functions.
This is a helper function for inverse_mellin_transform that will not
attempt any transformations on f.
Examples
========
>>> from sympy.integrals.transforms import _rewrite_gamma
>>> from sympy.abc import s
>>> from sympy import oo
>>> _rewrite_gamma(s*(s+3)*(s-1), s, -oo, oo)
(([], [-3, 0, 1]), ([-2, 1, 2], []), 1, 1, -1)
>>> _rewrite_gamma((s-1)**2, s, -oo, oo)
(([], [1, 1]), ([2, 2], []), 1, 1, 1)
Importance of the fundamental strip:
>>> _rewrite_gamma(1/s, s, 0, oo)
(([1], []), ([], [0]), 1, 1, 1)
>>> _rewrite_gamma(1/s, s, None, oo)
(([1], []), ([], [0]), 1, 1, 1)
>>> _rewrite_gamma(1/s, s, 0, None)
(([1], []), ([], [0]), 1, 1, 1)
>>> _rewrite_gamma(1/s, s, -oo, 0)
(([], [1]), ([0], []), 1, 1, -1)
>>> _rewrite_gamma(1/s, s, None, 0)
(([], [1]), ([0], []), 1, 1, -1)
>>> _rewrite_gamma(1/s, s, -oo, None)
(([], [1]), ([0], []), 1, 1, -1)
>>> _rewrite_gamma(2**(-s+3), s, -oo, oo)
(([], []), ([], []), 1/2, 1, 8)
"""
# Our strategy will be as follows:
# 1) Guess a constant c such that the inversion integral should be
# performed wrt s'=c*s (instead of plain s). Write s for s'.
# 2) Process all factors, rewrite them independently as gamma functions in
# argument s, or exponentials of s.
# 3) Try to transform all gamma functions s.t. they have argument
# a+s or a-s.
# 4) Check that the resulting G function parameters are valid.
# 5) Combine all the exponentials.
a_, b_ = S([a, b])
def left(c, is_numer):
"""
Decide whether pole at c lies to the left of the fundamental strip.
"""
# heuristically, this is the best chance for us to solve the inequalities
c = expand(re(c))
if a_ is None and b_ is S.Infinity:
return True
if a_ is None:
return c < b_
if b_ is None:
return c <= a_
if (c >= b_) == True:
return False
if (c <= a_) == True:
return True
if is_numer:
return None
if a_.free_symbols or b_.free_symbols or c.free_symbols:
return None # XXX
#raise IntegralTransformError('Inverse Mellin', f,
# 'Could not determine position of singularity %s'
# ' relative to fundamental strip' % c)
raise MellinTransformStripError('Pole inside critical strip?')
# 1)
s_multipliers = []
for g in f.atoms(gamma):
if not g.has(s):
continue
arg = g.args[0]
if arg.is_Add:
arg = arg.as_independent(s)[1]
coeff, _ = arg.as_coeff_mul(s)
s_multipliers += [coeff]
for g in f.atoms(sin, cos, tan, cot):
if not g.has(s):
continue
arg = g.args[0]
if arg.is_Add:
arg = arg.as_independent(s)[1]
coeff, _ = arg.as_coeff_mul(s)
s_multipliers += [coeff/pi]
s_multipliers = [Abs(x) if x.is_extended_real else x for x in s_multipliers]
common_coefficient = S.One
for x in s_multipliers:
if not x.is_Rational:
common_coefficient = x
break
s_multipliers = [x/common_coefficient for x in s_multipliers]
if not (all(x.is_Rational for x in s_multipliers) and
common_coefficient.is_extended_real):
raise IntegralTransformError("Gamma", None, "Nonrational multiplier")
s_multiplier = common_coefficient/reduce(ilcm, [S(x.q)
for x in s_multipliers], S.One)
if s_multiplier == common_coefficient:
if len(s_multipliers) == 0:
s_multiplier = common_coefficient
else:
s_multiplier = common_coefficient \
*reduce(igcd, [S(x.p) for x in s_multipliers])
f = f.subs(s, s/s_multiplier)
fac = S.One/s_multiplier
exponent = S.One/s_multiplier
if a_ is not None:
a_ *= s_multiplier
if b_ is not None:
b_ *= s_multiplier
# 2)
numer, denom = f.as_numer_denom()
numer = Mul.make_args(numer)
denom = Mul.make_args(denom)
args = list(zip(numer, repeat(True))) + list(zip(denom, repeat(False)))
facs = []
dfacs = []
# *_gammas will contain pairs (a, c) representing Gamma(a*s + c)
numer_gammas = []
denom_gammas = []
# exponentials will contain bases for exponentials of s
exponentials = []
def exception(fact):
return IntegralTransformError("Inverse Mellin", f, "Unrecognised form '%s'." % fact)
while args:
fact, is_numer = args.pop()
if is_numer:
ugammas, lgammas = numer_gammas, denom_gammas
ufacs = facs
else:
ugammas, lgammas = denom_gammas, numer_gammas
ufacs = dfacs
def linear_arg(arg):
""" Test if arg is of form a*s+b, raise exception if not. """
if not arg.is_polynomial(s):
raise exception(fact)
p = Poly(arg, s)
if p.degree() != 1:
raise exception(fact)
return p.all_coeffs()
# constants
if not fact.has(s):
ufacs += [fact]
# exponentials
elif fact.is_Pow or isinstance(fact, exp):
if fact.is_Pow:
base = fact.base
exp_ = fact.exp
else:
base = exp_polar(1)
exp_ = fact.exp
if exp_.is_Integer:
cond = is_numer
if exp_ < 0:
cond = not cond
args += [(base, cond)]*Abs(exp_)
continue
elif not base.has(s):
a, b = linear_arg(exp_)
if not is_numer:
base = 1/base
exponentials += [base**a]
facs += [base**b]
else:
raise exception(fact)
# linear factors
elif fact.is_polynomial(s):
p = Poly(fact, s)
if p.degree() != 1:
# We completely factor the poly. For this we need the roots.
# Now roots() only works in some cases (low degree), and CRootOf
# only works without parameters. So try both...
coeff = p.LT()[1]
rs = roots(p, s)
if len(rs) != p.degree():
rs = CRootOf.all_roots(p)
ufacs += [coeff]
args += [(s - c, is_numer) for c in rs]
continue
a, c = p.all_coeffs()
ufacs += [a]
c /= -a
# Now need to convert s - c
if left(c, is_numer):
ugammas += [(S.One, -c + 1)]
lgammas += [(S.One, -c)]
else:
ufacs += [-1]
ugammas += [(S.NegativeOne, c + 1)]
lgammas += [(S.NegativeOne, c)]
elif isinstance(fact, gamma):
a, b = linear_arg(fact.args[0])
if is_numer:
if (a > 0 and (left(-b/a, is_numer) == False)) or \
(a < 0 and (left(-b/a, is_numer) == True)):
raise NotImplementedError(
'Gammas partially over the strip.')
ugammas += [(a, b)]
elif isinstance(fact, sin):
# We try to re-write all trigs as gammas. This is not in
# general the best strategy, since sometimes this is impossible,
# but rewriting as exponentials would work. However trig functions
# in inverse mellin transforms usually all come from simplifying
# gamma terms, so this should work.
a = fact.args[0]
if is_numer:
# No problem with the poles.
gamma1, gamma2, fac_ = gamma(a/pi), gamma(1 - a/pi), pi
else:
gamma1, gamma2, fac_ = _rewrite_sin(linear_arg(a), s, a_, b_)
args += [(gamma1, not is_numer), (gamma2, not is_numer)]
ufacs += [fac_]
elif isinstance(fact, tan):
a = fact.args[0]
args += [(sin(a, evaluate=False), is_numer),
(sin(pi/2 - a, evaluate=False), not is_numer)]
elif isinstance(fact, cos):
a = fact.args[0]
args += [(sin(pi/2 - a, evaluate=False), is_numer)]
elif isinstance(fact, cot):
a = fact.args[0]
args += [(sin(pi/2 - a, evaluate=False), is_numer),
(sin(a, evaluate=False), not is_numer)]
else:
raise exception(fact)
fac *= Mul(*facs)/Mul(*dfacs)
# 3)
an, ap, bm, bq = [], [], [], []
for gammas, plus, minus, is_numer in [(numer_gammas, an, bm, True),
(denom_gammas, bq, ap, False)]:
while gammas:
a, c = gammas.pop()
if a != -1 and a != +1:
# We use the gamma function multiplication theorem.
p = Abs(S(a))
newa = a/p
newc = c/p
if not a.is_Integer:
raise TypeError("a is not an integer")
for k in range(p):
gammas += [(newa, newc + k/p)]
if is_numer:
fac *= (2*pi)**((1 - p)/2) * p**(c - S.Half)
exponentials += [p**a]
else:
fac /= (2*pi)**((1 - p)/2) * p**(c - S.Half)
exponentials += [p**(-a)]
continue
if a == +1:
plus.append(1 - c)
else:
minus.append(c)
# 4)
# TODO
# 5)
arg = Mul(*exponentials)
# for testability, sort the arguments
an.sort(key=default_sort_key)
ap.sort(key=default_sort_key)
bm.sort(key=default_sort_key)
bq.sort(key=default_sort_key)
return (an, ap), (bm, bq), arg, exponent, fac
@_noconds_(True)
def _inverse_mellin_transform(F, s, x_, strip, as_meijerg=False):
""" A helper for the real inverse_mellin_transform function, this one here
assumes x to be real and positive. """
x = _dummy('t', 'inverse-mellin-transform', F, positive=True)
# Actually, we won't try integration at all. Instead we use the definition
# of the Meijer G function as a fairly general inverse mellin transform.
F = F.rewrite(gamma)
for g in [factor(F), expand_mul(F), expand(F)]:
if g.is_Add:
# do all terms separately
ress = [_inverse_mellin_transform(G, s, x, strip, as_meijerg,
noconds=False)
for G in g.args]
conds = [p[1] for p in ress]
ress = [p[0] for p in ress]
res = Add(*ress)
if not as_meijerg:
res = factor(res, gens=res.atoms(Heaviside))
return res.subs(x, x_), And(*conds)
try:
a, b, C, e, fac = _rewrite_gamma(g, s, strip[0], strip[1])
except IntegralTransformError:
continue
try:
G = meijerg(a, b, C/x**e)
except ValueError:
continue
if as_meijerg:
h = G
else:
try:
from sympy.simplify import hyperexpand
h = hyperexpand(G)
except NotImplementedError:
raise IntegralTransformError(
'Inverse Mellin', F, 'Could not calculate integral')
if h.is_Piecewise and len(h.args) == 3:
# XXX we break modularity here!
h = Heaviside(x - Abs(C))*h.args[0].args[0] \
+ Heaviside(Abs(C) - x)*h.args[1].args[0]
# We must ensure that the integral along the line we want converges,
# and return that value.
# See [L], 5.2
cond = [Abs(arg(G.argument)) < G.delta*pi]
# Note: we allow ">=" here, this corresponds to convergence if we let
# limits go to oo symmetrically. ">" corresponds to absolute convergence.
cond += [And(Or(len(G.ap) != len(G.bq), 0 >= re(G.nu) + 1),
Abs(arg(G.argument)) == G.delta*pi)]
cond = Or(*cond)
if cond == False:
raise IntegralTransformError(
'Inverse Mellin', F, 'does not converge')
return (h*fac).subs(x, x_), cond
raise IntegralTransformError('Inverse Mellin', F, '')
_allowed = None
class InverseMellinTransform(IntegralTransform):
"""
Class representing unevaluated inverse Mellin transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute inverse Mellin transforms, see the
:func:`inverse_mellin_transform` docstring.
"""
_name = 'Inverse Mellin'
_none_sentinel = Dummy('None')
_c = Dummy('c')
def __new__(cls, F, s, x, a, b, **opts):
if a is None:
a = InverseMellinTransform._none_sentinel
if b is None:
b = InverseMellinTransform._none_sentinel
return IntegralTransform.__new__(cls, F, s, x, a, b, **opts)
@property
def fundamental_strip(self):
a, b = self.args[3], self.args[4]
if a is InverseMellinTransform._none_sentinel:
a = None
if b is InverseMellinTransform._none_sentinel:
b = None
return a, b
def _compute_transform(self, F, s, x, **hints):
# IntegralTransform's doit will cause this hint to exist, but
# InverseMellinTransform should ignore it
hints.pop('simplify', True)
global _allowed
if _allowed is None:
_allowed = {
exp, gamma, sin, cos, tan, cot, cosh, sinh, tanh, coth,
factorial, rf}
for f in postorder_traversal(F):
if f.is_Function and f.has(s) and f.func not in _allowed:
raise IntegralTransformError('Inverse Mellin', F,
'Component %s not recognised.' % f)
strip = self.fundamental_strip
return _inverse_mellin_transform(F, s, x, strip, **hints)
def _as_integral(self, F, s, x):
c = self.__class__._c
return Integral(F*x**(-s), (s, c - S.ImaginaryUnit*S.Infinity, c +
S.ImaginaryUnit*S.Infinity))/(2*S.Pi*S.ImaginaryUnit)
def inverse_mellin_transform(F, s, x, strip, **hints):
r"""
Compute the inverse Mellin transform of `F(s)` over the fundamental
strip given by ``strip=(a, b)``.
Explanation
===========
This can be defined as
.. math:: f(x) = \frac{1}{2\pi i} \int_{c - i\infty}^{c + i\infty} x^{-s} F(s) \mathrm{d}s,
for any `c` in the fundamental strip. Under certain regularity
conditions on `F` and/or `f`,
this recovers `f` from its Mellin transform `F`
(and vice versa), for positive real `x`.
One of `a` or `b` may be passed as ``None``; a suitable `c` will be
inferred.
If the integral cannot be computed in closed form, this function returns
an unevaluated :class:`InverseMellinTransform` object.
Note that this function will assume x to be positive and real, regardless
of the SymPy assumptions!
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Examples
========
>>> from sympy import inverse_mellin_transform, oo, gamma
>>> from sympy.abc import x, s
>>> inverse_mellin_transform(gamma(s), s, x, (0, oo))
exp(-x)
The fundamental strip matters:
>>> f = 1/(s**2 - 1)
>>> inverse_mellin_transform(f, s, x, (-oo, -1))
x*(1 - 1/x**2)*Heaviside(x - 1)/2
>>> inverse_mellin_transform(f, s, x, (-1, 1))
-x*Heaviside(1 - x)/2 - Heaviside(x - 1)/(2*x)
>>> inverse_mellin_transform(f, s, x, (1, oo))
(1/2 - x**2/2)*Heaviside(1 - x)/x
See Also
========
mellin_transform
hankel_transform, inverse_hankel_transform
"""
return InverseMellinTransform(F, s, x, strip[0], strip[1]).doit(**hints)
##########################################################################
# Laplace Transform
##########################################################################
def _simplifyconds(expr, s, a):
r"""
Naively simplify some conditions occurring in ``expr``, given that `\operatorname{Re}(s) > a`.
Examples
========
>>> from sympy.integrals.transforms import _simplifyconds as simp
>>> from sympy.abc import x
>>> from sympy import sympify as S
>>> simp(abs(x**2) < 1, x, 1)
False
>>> simp(abs(x**2) < 1, x, 2)
False
>>> simp(abs(x**2) < 1, x, 0)
Abs(x**2) < 1
>>> simp(abs(1/x**2) < 1, x, 1)
True
>>> simp(S(1) < abs(x), x, 1)
True
>>> simp(S(1) < abs(1/x), x, 1)
False
>>> from sympy import Ne
>>> simp(Ne(1, x**3), x, 1)
True
>>> simp(Ne(1, x**3), x, 2)
True
>>> simp(Ne(1, x**3), x, 0)
Ne(1, x**3)
"""
def power(ex):
if ex == s:
return 1
if ex.is_Pow and ex.base == s:
return ex.exp
return None
def bigger(ex1, ex2):
""" Return True only if |ex1| > |ex2|, False only if |ex1| < |ex2|.
Else return None. """
if ex1.has(s) and ex2.has(s):
return None
if isinstance(ex1, Abs):
ex1 = ex1.args[0]
if isinstance(ex2, Abs):
ex2 = ex2.args[0]
if ex1.has(s):
return bigger(1/ex2, 1/ex1)
n = power(ex2)
if n is None:
return None
try:
if n > 0 and (Abs(ex1) <= Abs(a)**n) == True:
return False
if n < 0 and (Abs(ex1) >= Abs(a)**n) == True:
return True
except TypeError:
pass
def replie(x, y):
""" simplify x < y """
if not (x.is_positive or isinstance(x, Abs)) \
or not (y.is_positive or isinstance(y, Abs)):
return (x < y)
r = bigger(x, y)
if r is not None:
return not r
return (x < y)
def replue(x, y):
b = bigger(x, y)
if b in (True, False):
return True
return Unequality(x, y)
def repl(ex, *args):
if ex in (True, False):
return bool(ex)
return ex.replace(*args)
from sympy.simplify.radsimp import collect_abs
expr = collect_abs(expr)
expr = repl(expr, Lt, replie)
expr = repl(expr, Gt, lambda x, y: replie(y, x))
expr = repl(expr, Unequality, replue)
return S(expr)
def expand_dirac_delta(expr):
"""
Expand an expression involving DiractDelta to get it as a linear
combination of DiracDelta functions.
"""
return _lin_eq2dict(expr, expr.atoms(DiracDelta))
@_noconds
def _laplace_transform(f, t, s_, simplify=True):
""" The backend function for Laplace transforms.
This backend assumes that the frontend has already split sums
such that `f` is to an addition anymore.
"""
s = Dummy('s')
a = Wild('a', exclude=[t])
deltazero = []
deltanonzero = []
try:
integratable, deltadict = expand_dirac_delta(f)
except PolyNonlinearError:
raise IntegralTransformError(
'Laplace', f, 'could not expand DiracDelta expressions')
for dirac_func, dirac_coeff in deltadict.items():
p = dirac_func.match(DiracDelta(a*t))
if p:
deltazero.append(dirac_coeff.subs(t,0)/p[a])
else:
if dirac_func.args[0].subs(t,0).is_zero:
raise IntegralTransformError('Laplace', f,\
'not implemented yet.')
else:
deltanonzero.append(dirac_func*dirac_coeff)
F = Add(integrate(exp(-s*t) * Add(integratable, *deltanonzero),
(t, S.Zero, S.Infinity)),
Add(*deltazero))
if not F.has(Integral):
return _simplify(F.subs(s, s_), simplify), S.NegativeInfinity, S.true
if not F.is_Piecewise:
raise IntegralTransformError(
'Laplace', f, 'could not compute integral')
F, cond = F.args[0]
if F.has(Integral):
raise IntegralTransformError(
'Laplace', f, 'integral in unexpected form')
def process_conds(conds):
""" Turn ``conds`` into a strip and auxiliary conditions. """
from sympy.solvers.inequalities import _solve_inequality
a = S.NegativeInfinity
aux = S.true
conds = conjuncts(to_cnf(conds))
p, q, w1, w2, w3, w4, w5 = symbols(
'p q w1 w2 w3 w4 w5', cls=Wild, exclude=[s])
patterns = (
p*Abs(arg((s + w3)*q)) < w2,
p*Abs(arg((s + w3)*q)) <= w2,
Abs(periodic_argument((s + w3)**p*q, w1)) < w2,
Abs(periodic_argument((s + w3)**p*q, w1)) <= w2,
Abs(periodic_argument((polar_lift(s + w3))**p*q, w1)) < w2,
Abs(periodic_argument((polar_lift(s + w3))**p*q, w1)) <= w2)
for c in conds:
a_ = S.Infinity
aux_ = []
for d in disjuncts(c):
if d.is_Relational and s in d.rhs.free_symbols:
d = d.reversed
if d.is_Relational and isinstance(d, (Ge, Gt)):
d = d.reversedsign
for pat in patterns:
m = d.match(pat)
if m:
break
if m:
if m[q].is_positive and m[w2]/m[p] == pi/2:
d = -re(s + m[w3]) < 0
m = d.match(p - cos(w1*Abs(arg(s*w5))*w2)*Abs(s**w3)**w4 < 0)
if not m:
m = d.match(
cos(p - Abs(periodic_argument(s**w1*w5, q))*w2)*Abs(s**w3)**w4 < 0)
if not m:
m = d.match(
p - cos(Abs(periodic_argument(polar_lift(s)**w1*w5, q))*w2
)*Abs(s**w3)**w4 < 0)
if m and all(m[wild].is_positive for wild in [w1, w2, w3, w4, w5]):
d = re(s) > m[p]
d_ = d.replace(
re, lambda x: x.expand().as_real_imag()[0]).subs(re(s), t)
if not d.is_Relational or \
d.rel_op in ('==', '!=') \
or d_.has(s) or not d_.has(t):
aux_ += [d]
continue
soln = _solve_inequality(d_, t)
if not soln.is_Relational or \
soln.rel_op in ('==', '!='):
aux_ += [d]
continue
if soln.lts == t:
raise IntegralTransformError('Laplace', f,
'convergence not in half-plane?')
else:
a_ = Min(soln.lts, a_)
if a_ is not S.Infinity:
a = Max(a_, a)
else:
aux = And(aux, Or(*aux_))
return a, aux.canonical if aux.is_Relational else aux
conds = [process_conds(c) for c in disjuncts(cond)]
conds2 = [x for x in conds if x[1] != False and x[0] is not S.NegativeInfinity]
if not conds2:
conds2 = [x for x in conds if x[1] != False]
conds = list(ordered(conds2))
def cnt(expr):
if expr in (True, False):
return 0
return expr.count_ops()
conds.sort(key=lambda x: (-x[0], cnt(x[1])))
if not conds:
raise IntegralTransformError('Laplace', f, 'no convergence found')
a, aux = conds[0] # XXX is [0] always the right one?
def sbs(expr):
return expr.subs(s, s_)
if simplify:
F = _simplifyconds(F, s, a)
aux = _simplifyconds(aux, s, a)
return _simplify(F.subs(s, s_), simplify), sbs(a), _canonical(sbs(aux))
def _laplace_deep_collect(f, t):
"""
This is an internal helper function that traverses through the epression
tree of `f(t)` and collects arguments. The purpose of it is that
anything like `f(w*t-1*t-c)` will be written as `f((w-1)*t-c)` such that
it can match `f(a*t+b)`.
"""
func = f.func
args = list(f.args)
if len(f.args) == 0:
return f
else:
for k in range(len(args)):
args[k] = _laplace_deep_collect(args[k], t)
if func.is_Add:
return func(*args).collect(t)
else:
return func(*args)
def _laplace_build_rules(t, s):
"""
This is an internal helper function that returns the table of Laplace
transfrom rules in terms of the time variable `t` and the frequency
variable `s`. It is used by `_laplace_apply_rules`.
"""
a = Wild('a', exclude=[t])
b = Wild('b', exclude=[t])
n = Wild('n', exclude=[t])
tau = Wild('tau', exclude=[t])
omega = Wild('omega', exclude=[t])
dco = lambda f: _laplace_deep_collect(f,t)
laplace_transform_rules = [
# ( time domain,
# laplace domain,
# condition, convergence plane, preparation function )
#
# Catch constant (would otherwise be treated by 2.12)
(a, a/s, S.true, S.Zero, dco),
# DiracDelta rules
(DiracDelta(a*t-b),
exp(-s*b/a)/Abs(a),
Or(And(a>0, b>=0), And(a<0, b<=0)), S.Zero, dco),
(DiracDelta(a*t-b),
S(0),
Or(And(a<0, b>=0), And(a>0, b<=0)), S.Zero, dco),
# Rules from http://eqworld.ipmnet.ru/en/auxiliary/inttrans/
# 2.1
(1,
1/s,
S.true, S.Zero, dco),
# 2.2 expressed in terms of Heaviside
(Heaviside(a*t-b),
exp(-s*b/a)/s,
And(a>0, b>0), S.Zero, dco),
(Heaviside(a*t-b),
(1-exp(-s*b/a))/s,
And(a<0, b<0), S.Zero, dco),
(Heaviside(a*t-b),
1/s,
And(a>0, b<=0), S.Zero, dco),
(Heaviside(a*t-b),
0,
And(a<0, b>0), S.Zero, dco),
# 2.3
(t,
1/s**2,
S.true, S.Zero, dco),
# 2.4
(1/(a*t+b),
-exp(-b/a*s)*Ei(-b/a*s)/a,
a>0, S.Zero, dco),
# 2.5 and 2.6 are covered by 2.11
# 2.7
(1/sqrt(a*t+b),
sqrt(a*pi/s)*exp(b/a*s)*erfc(sqrt(b/a*s))/a,
a>0, S.Zero, dco),
# 2.8
(sqrt(t)/(t+b),
sqrt(pi/s)-pi*sqrt(b)*exp(b*s)*erfc(sqrt(b*s)),
S.true, S.Zero, dco),
# 2.9
((a*t+b)**(-S(3)/2),
2*b**(-S(1)/2)-2*(pi*s/a)**(S(1)/2)*exp(b/a*s)*erfc(sqrt(b/a*s))/a,
a>0, S.Zero, dco),
# 2.10
(t**(S(1)/2)*(t+a)**(-1),
(pi/s)**(S(1)/2)-pi*a**(S(1)/2)*exp(a*s)*erfc(sqrt(a*s)),
S.true, S.Zero, dco),
# 2.11
(1/(a*sqrt(t) + t**(3/2)),
pi*a**(S(1)/2)*exp(a*s)*erfc(sqrt(a*s)),
S.true, S.Zero, dco),
# 2.12
(t**n,
gamma(n+1)/s**(n+1),
n>-1, S.Zero, dco),
# 2.13
((a*t+b)**n,
lowergamma(n+1, b/a*s)*exp(-b/a*s)/s**(n+1)/a,
And(n>-1, a>0), S.Zero, dco),
# 2.14
(t**n/(t+a),
a**n*gamma(n+1)*lowergamma(-n,a*s),
n>-1, S.Zero, dco),
# 3.1
(exp(a*t-tau),
exp(-tau)/(s-a),
S.true, a, dco),
# 3.2
(t*exp(a*t-tau),
exp(-tau)/(s-a)**2,
S.true, a, dco),
# 3.3
(t**n*exp(a*t),
gamma(n+1)/(s-a)**(n+1),
n>-1, a, dco),
# 3.4 and 3.5 cannot be covered here because they are
# sums and only the individual sum terms will get here.
# 3.6
(exp(-a*t**2),
sqrt(pi/4/a)*exp(s**2/4/a)*erfc(s/sqrt(4*a)),
a>0, S.Zero, dco),
# 3.7
(t*exp(-a*t**2),
1/(2*a)-2/sqrt(pi)/(4*a)**(S(3)/2)*s*erfc(s/sqrt(4*a)),
S.true, S.Zero, dco),
# 3.8
(exp(-a/t),
2*sqrt(a/s)*besselk(1, 2*sqrt(a*s)),
a>=0, S.Zero, dco),
# 3.9
(sqrt(t)*exp(-a/t),
S(1)/2*sqrt(pi/s**3)*(1+2*sqrt(a*s))*exp(-2*sqrt(a*s)),
a>=0, S.Zero, dco),
# 3.10
(exp(-a/t)/sqrt(t),
sqrt(pi/s)*exp(-2*sqrt(a*s)),
a>=0, S.Zero, dco),
# 3.11
(exp(-a/t)/(t*sqrt(t)),
sqrt(pi/a)*exp(-2*sqrt(a*s)),
a>0, S.Zero, dco),
# 3.12
(t**n*exp(-a/t),
2*(a/s)**((n+1)/2)*besselk(n+1, 2*sqrt(a*s)),
a>0, S.Zero, dco),
# 3.13
(exp(-2*sqrt(a*t)),
s**(-1)-sqrt(pi*a)*s**(-S(3)/2)*exp(a/s)*erfc(sqrt(a/s)),
S.true, S.Zero, dco),
# 3.14
(exp(-2*sqrt(a*t))/sqrt(t),
(pi/s)**(S(1)/2)*exp(a/s)*erfc(sqrt(a/s)),
S.true, S.Zero, dco),
# 4.1
(sinh(a*t),
a/(s**2-a**2),
S.true, Abs(a), dco),
# 4.2
(sinh(a*t)**2,
2*a**2/(s**3-4*a**2*s**2),
S.true, Abs(2*a), dco),
# 4.3
(sinh(a*t)/t,
log((s+a)/(s-a))/2,
S.true, a, dco),
# 4.4
(t**n*sinh(a*t),
gamma(n+1)/2*((s-a)**(-n-1)-(s+a)**(-n-1)),
n>-2, Abs(a), dco),
# 4.5
(sinh(2*sqrt(a*t)),
sqrt(pi*a)/s/sqrt(s)*exp(a/s),
S.true, S.Zero, dco),
# 4.6
(sqrt(t)*sinh(2*sqrt(a*t)),
pi**(S(1)/2)*s**(-S(5)/2)*(s/2+a)*exp(a/s)*erf(sqrt(a/s))-a**(S(1)/2)*s**(-2),
S.true, S.Zero, dco),
# 4.7
(sinh(2*sqrt(a*t))/sqrt(t),
pi**(S(1)/2)*s**(-S(1)/2)*exp(a/s)*erf(sqrt(a/s)),
S.true, S.Zero, dco),
# 4.8
(sinh(sqrt(a*t))**2/sqrt(t),
pi**(S(1)/2)/2*s**(-S(1)/2)*(exp(a/s)-1),
S.true, S.Zero, dco),
# 4.9
(cosh(a*t),
s/(s**2-a**2),
S.true, Abs(a), dco),
# 4.10
(cosh(a*t)**2,
(s**2-2*a**2)/(s**3-4*a**2*s**2),
S.true, Abs(2*a), dco),
# 4.11
(t**n*cosh(a*t),
gamma(n+1)/2*((s-a)**(-n-1)+(s+a)**(-n-1)),
n>-1, Abs(a), dco),
# 4.12
(cosh(2*sqrt(a*t)),
1/s+sqrt(pi*a)/s/sqrt(s)*exp(a/s)*erf(sqrt(a/s)),
S.true, S.Zero, dco),
# 4.13
(sqrt(t)*cosh(2*sqrt(a*t)),
pi**(S(1)/2)*s**(-S(5)/2)*(s/2+a)*exp(a/s),
S.true, S.Zero, dco),
# 4.14
(cosh(2*sqrt(a*t))/sqrt(t),
pi**(S(1)/2)*s**(-S(1)/2)*exp(a/s),
S.true, S.Zero, dco),
# 4.15
(cosh(sqrt(a*t))**2/sqrt(t),
pi**(S(1)/2)/2*s**(-S(1)/2)*(exp(a/s)+1),
S.true, S.Zero, dco),
# 5.1
(log(a*t),
-log(s/a+S.EulerGamma)/s,
a>0, S.Zero, dco),
# 5.2
(log(1+a*t),
-exp(s/a)/s*Ei(-s/a),
S.true, S.Zero, dco),
# 5.3
(log(a*t+b),
(log(b)-exp(s/b/a)/s*a*Ei(-s/b))/s*a,
a>0, S.Zero, dco),
# 5.4 is covered by 5.7
# 5.5
(log(t)/sqrt(t),
-sqrt(pi/s)*(log(4*s)+S.EulerGamma),
S.true, S.Zero, dco),
# 5.6 is covered by 5.7
# 5.7
(t**n*log(t),
gamma(n+1)*s**(-n-1)*(digamma(n+1)-log(s)),
n>-1, S.Zero, dco),
# 5.8
(log(a*t)**2,
((log(s/a)+S.EulerGamma)**2+pi**2/6)/s,
a>0, S.Zero, dco),
# 5.9
(exp(-a*t)*log(t),
-(log(s+a)+S.EulerGamma)/(s+a),
S.true, -a, dco),
# 6.1
(sin(omega*t),
omega/(s**2+omega**2),
S.true, S.Zero, dco),
# 6.2
(Abs(sin(omega*t)),
omega/(s**2+omega**2)*coth(pi*s/2/omega),
omega>0, S.Zero, dco),
# 6.3 and 6.4 are covered by 1.8
# 6.5 is covered by 1.8 together with 2.5
# 6.6
(sin(omega*t)/t,
atan(omega/s),
S.true, S.Zero, dco),
# 6.7
(sin(omega*t)**2/t,
log(1+4*omega**2/s**2)/4,
S.true, S.Zero, dco),
# 6.8
(sin(omega*t)**2/t**2,
omega*atan(2*omega/s)-s*log(1+4*omega**2/s**2)/4,
S.true, S.Zero, dco),
# 6.9
(sin(2*sqrt(a*t)),
sqrt(pi*a)/s/sqrt(s)*exp(-a/s),
a>0, S.Zero, dco),
# 6.10
(sin(2*sqrt(a*t))/t,
pi*erf(sqrt(a/s)),
a>0, S.Zero, dco),
# 6.11
(cos(omega*t),
s/(s**2+omega**2),
S.true, S.Zero, dco),
# 6.12
(cos(omega*t)**2,
(s**2+2*omega**2)/(s**2+4*omega**2)/s,
S.true, S.Zero, dco),
# 6.13 is covered by 1.9 together with 2.5
# 6.14 and 6.15 cannot be done with this method, the respective sum
# parts do not converge. Solve elsewhere if really needed.
# 6.16
(sqrt(t)*cos(2*sqrt(a*t)),
sqrt(pi)/2*s**(-S(5)/2)*(s-2*a)*exp(-a/s),
a>0, S.Zero, dco),
# 6.17
(cos(2*sqrt(a*t))/sqrt(t),
sqrt(pi/s)*exp(-a/s),
a>0, S.Zero, dco),
# 6.18
(sin(a*t)*sin(b*t),
2*a*b*s/(s**2+(a+b)**2)/(s**2+(a-b)**2),
S.true, S.Zero, dco),
# 6.19
(cos(a*t)*sin(b*t),
b*(s**2-a**2+b**2)/(s**2+(a+b)**2)/(s**2+(a-b)**2),
S.true, S.Zero, dco),
# 6.20
(cos(a*t)*cos(b*t),
s*(s**2+a**2+b**2)/(s**2+(a+b)**2)/(s**2+(a-b)**2),
S.true, S.Zero, dco),
# 6.21
(exp(b*t)*sin(a*t),
a/((s-b)**2+a**2),
S.true, b, dco),
# 6.22
(exp(b*t)*cos(a*t),
(s-b)/((s-b)**2+a**2),
S.true, b, dco),
# 7.1
(erf(a*t),
exp(s**2/(2*a)**2)*erfc(s/(2*a))/s,
a>0, S.Zero, dco),
# 7.2
(erf(sqrt(a*t)),
sqrt(a)/sqrt(s+a)/s,
a>0, S.Zero, dco),
# 7.3
(exp(a*t)*erf(sqrt(a*t)),
sqrt(a)/sqrt(s)/(s-a),
a>0, a, dco),
# 7.4
(erf(sqrt(a/t)/2),
(1-exp(-sqrt(a*s)))/s,
a>0, S.Zero, dco),
# 7.5
(erfc(sqrt(a*t)),
(sqrt(s+a)-sqrt(a))/sqrt(s+a)/s,
a>0, S.Zero, dco),
# 7.6
(exp(a*t)*erfc(sqrt(a*t)),
1/(s+sqrt(a*s)),
a>0, S.Zero, dco),
# 7.7
(erfc(sqrt(a/t)/2),
exp(-sqrt(a*s))/s,
a>0, S.Zero, dco),
# 8.1, 8.2
(besselj(n, a*t),
a**n/(sqrt(s**2+a**2)*(s+sqrt(s**2+a**2))**n),
And(a>0, n>-1), S.Zero, dco),
# 8.3, 8.4
(t**b*besselj(n, a*t),
2**n/sqrt(pi)*gamma(n+S.Half)*a**n*(s**2+a**2)**(-n-S.Half),
And(And(a>0, n>-S.Half), Eq(b, n)), S.Zero, dco),
# 8.5
(t**b*besselj(n, a*t),
2**(n+1)/sqrt(pi)*gamma(n+S(3)/2)*a**n*s*(s**2+a**2)**(-n-S(3)/2),
And(And(a>0, n>-1), Eq(b, n+1)), S.Zero, dco),
# 8.6
(besselj(0, 2*sqrt(a*t)),
exp(-a/s)/s,
a>0, S.Zero, dco),
# 8.7, 8.8
(t**(b)*besselj(n, 2*sqrt(a*t)),
a**(n/2)*s**(-n-1)*exp(-a/s),
And(And(a>0, n>-1), Eq(b, n*S.Half)), S.Zero, dco),
# 8.9
(besselj(0, a*sqrt(t**2+b*t)),
exp(b*s-b*sqrt(s**2+a**2))/sqrt(s**2+a**2),
b>0, S.Zero, dco),
# 8.10, 8.11
(besseli(n, a*t),
a**n/(sqrt(s**2-a**2)*(s+sqrt(s**2-a**2))**n),
And(a>0, n>-1), Abs(a), dco),
# 8.12
(t**b*besseli(n, a*t),
2**n/sqrt(pi)*gamma(n+S.Half)*a**n*(s**2-a**2)**(-n-S.Half),
And(And(a>0, n>-S.Half), Eq(b, n)), Abs(a), dco),
# 8.13
(t**b*besseli(n, a*t),
2**(n+1)/sqrt(pi)*gamma(n+S(3)/2)*a**n*s*(s**2-a**2)**(-n-S(3)/2),
And(And(a>0, n>-1), Eq(b, n+1)), Abs(a), dco),
# 8.15, 8.16
(t**(b)*besseli(n, 2*sqrt(a*t)),
a**(n/2)*s**(-n-1)*exp(a/s),
And(And(a>0, n>-1), Eq(b, n*S.Half)), S.Zero, dco),
# 8.17
(bessely(0, a*t),
-2/pi*asinh(s/a)/sqrt(s**2+a**2),
a>0, S.Zero, dco),
# 8.18
(besselk(0, a*t),
(log(s+sqrt(s**2-a**2)))/(sqrt(s**2-a**2)),
a>0, Abs(a), dco)
]
return laplace_transform_rules
def _laplace_cr(f, a, c, **hints):
"""
Internal helper function that will return `(f, a, c)` unless `**hints`
contains `noconds=True`, in which case it will only return `f`.
"""
conds = not hints.get('noconds', False)
if conds:
return f, a, c
else:
return f
def _laplace_rule_timescale(f, t, s, doit=True, **hints):
r"""
This internal helper function tries to apply the time-scaling rule of the
Laplace transform and returns `None` if it cannot do it.
Time-scaling means the following: if $F(s)$ is the Laplace transform of,
$f(t)$, then, for any $a>0$, the Laplace transform of $f(at)$ will be
$\frac1a F(\frac{s}{a})$. This scaling will also affect the transform's
convergence plane.
"""
_simplify = hints.pop('simplify', True)
b = Wild('b', exclude=[t])
g = WildFunction('g', nargs=1)
k, func = f.as_independent(t, as_Add=False)
ma1 = func.match(g)
if ma1:
arg = ma1[g].args[0].collect(t)
ma2 = arg.match(b*t)
if ma2 and ma2[b]>0:
debug('_laplace_apply_rules match:')
debug(' f: %s ( %s, %s )'%(f, ma1, ma2))
debug(' rule: amplitude and time scaling (1.1, 1.2)')
if ma2[b]==1:
if doit==True and not any(func.has(t) for func
in ma1[g].atoms(AppliedUndef)):
return k*_laplace_transform(ma1[g].func(t), t, s,
simplify=_simplify)
else:
return k*LaplaceTransform(ma1[g].func(t), t, s, **hints)
else:
L = _laplace_apply_rules(ma1[g].func(t), t, s/ma2[b],
doit=doit, **hints)
try:
r, p, c = L
return (k/ma2[b]*r, p, c)
except TypeError:
return k/ma2[b]*L
return None
def _laplace_rule_heaviside(f, t, s, doit=True, **hints):
"""
This internal helper function tries to transform a product containing the
`Heaviside` function and returns `None` if it cannot do it.
"""
hints.pop('simplify', True)
a = Wild('a', exclude=[t])
b = Wild('b', exclude=[t])
y = Wild('y')
g = WildFunction('g', nargs=1)
k, func = f.as_independent(t, as_Add=False)
ma1 = func.match(Heaviside(y)*g)
if ma1:
ma2 = ma1[y].match(t-a)
ma3 = ma1[g].args[0].collect(t).match(t-b)
if ma2 and ma2[a]>0 and ma3 and ma2[a]==ma3[b]:
debug('_laplace_apply_rules match:')
debug(' f: %s ( %s, %s, %s )'%(f, ma1, ma2, ma3))
debug(' rule: time shift (1.3)')
L = _laplace_apply_rules(ma1[g].func(t), t, s, doit=doit, **hints)
try:
r, p, c = L
return (k*exp(-ma2[a]*s)*r, p, c)
except TypeError:
return k*exp(-ma2[a]*s)*L
return None
def _laplace_rule_exp(f, t, s, doit=True, **hints):
"""
This internal helper function tries to transform a product containing the
`exp` function and returns `None` if it cannot do it.
"""
hints.pop('simplify', True)
a = Wild('a', exclude=[t])
y = Wild('y')
z = Wild('z')
k, func = f.as_independent(t, as_Add=False)
ma1 = func.match(exp(y)*z)
if ma1:
ma2 = ma1[y].collect(t).match(a*t)
if ma2:
debug('_laplace_apply_rules match:')
debug(' f: %s ( %s, %s )'%(f, ma1, ma2))
debug(' rule: multiply with exp (1.5)')
L = _laplace_apply_rules(ma1[z], t, s-ma2[a], doit=doit, **hints)
try:
r, p, c = L
return (r, p+ma2[a], c)
except TypeError:
return L
return None
def _laplace_rule_trig(f, t, s, doit=True, **hints):
"""
This internal helper function tries to transform a product containing a
trigonometric function (`sin`, `cos`, `sinh`, `cosh`, ) and returns
`None` if it cannot do it.
"""
_simplify = hints.pop('simplify', True)
a = Wild('a', exclude=[t])
y = Wild('y')
z = Wild('z')
k, func = f.as_independent(t, as_Add=False)
# All of the rules have a very similar form: trig(y)*z is matched, and then
# two copies of the Laplace transform of z are shifted in the s Domain
# and added with a weight; see rules 1.6 to 1.9 in
# http://eqworld.ipmnet.ru/en/auxiliary/inttrans/laplace1.pdf
# The parameters in the tuples are (fm, nu, s1, s2, sd):
# fm: Function to match
# nu: Number of the rule, for debug purposes
# s1: weight of the sum, 'I' for sin and '1' for all others
# s2: sign of the second copy of the Laplace transform of z
# sd: shift direction; shift along real or imaginary axis if `1` or `I`
trigrules = [(sinh(y), '1.6', 1, -1, 1), (cosh(y), '1.7', 1, 1, 1),
(sin(y), '1.8', -I, -1, I), (cos(y), '1.9', 1, 1, I)]
for trigrule in trigrules:
fm, nu, s1, s2, sd = trigrule
ma1 = func.match(fm*z)
if ma1:
ma2 = ma1[y].collect(t).match(a*t)
if ma2:
debug('_laplace_apply_rules match:')
debug(' f: %s ( %s, %s )'%(f, ma1, ma2))
debug(' rule: multiply with %s (%s)'%(fm.func, nu))
L = _laplace_apply_rules(ma1[z], t, s, doit=doit, **hints)
try:
r, p, c = L
# The convergence plane changes only if the shift has been
# done along the real axis:
if sd==1:
cp_shift = Abs(ma2[a])
else:
cp_shift = 0
return ((s1*(r.subs(s, s-sd*ma2[a])+\
s2*r.subs(s, s+sd*ma2[a]))).simplify()/2,
p+cp_shift, c)
except TypeError:
if doit==True and _simplify==True:
return (s1*(L.subs(s, s-sd*ma2[a])+\
s2*L.subs(s, s+sd*ma2[a]))).simplify()/2
else:
return (s1*(L.subs(s, s-sd*ma2[a])+\
s2*L.subs(s, s+sd*ma2[a])))/2
return None
def _laplace_rule_diff(f, t, s, doit=True, **hints):
"""
This internal helper function tries to transform an expression containing
a derivative of an undefined function and returns `None` if it cannot
do it.
"""
hints.pop('simplify', True)
a = Wild('a', exclude=[t])
y = Wild('y')
n = Wild('n', exclude=[t])
g = WildFunction('g', nargs=1)
ma1 = f.match(a*Derivative(g, (t, n)))
if ma1 and ma1[g].args[0] == t and ma1[n].is_integer:
debug('_laplace_apply_rules match:')
debug(' f: %s'%(f,))
debug(' rule: time derivative (1.11, 1.12)')
d = []
for k in range(ma1[n]):
if k==0:
y = ma1[g].func(t).subs(t, 0)
else:
y = Derivative(ma1[g].func(t), (t, k)).subs(t, 0)
d.append(s**(ma1[n]-k-1)*y)
r = s**ma1[n]*_laplace_apply_rules(ma1[g].func(t), t, s, doit=doit,
**hints)
return r - Add(*d)
return None
def _laplace_apply_rules(f, t, s, doit=True, **hints):
"""
Helper function for the class LaplaceTransform.
This function does a Laplace transform based on rules and, after
applying the rules, hands the rest over to `_laplace_transform`, which
will attempt to integrate.
If it is called with `doit=False`, then it will instead return
`LaplaceTransform` objects.
"""
k, func = f.as_independent(t, as_Add=False)
simple_rules = _laplace_build_rules(t, s)
for t_dom, s_dom, check, plane, prep in simple_rules:
ma = prep(func).match(t_dom)
if ma:
debug('_laplace_apply_rules match:')
debug(' f: %s'%(func,))
debug(' rule: %s o---o %s'%(t_dom, s_dom))
try:
debug(' try %s'%(check,))
c = check.xreplace(ma)
debug(' check %s -> %s'%(check, c))
if c==True:
return _laplace_cr(k*s_dom.xreplace(ma),
plane.xreplace(ma), S.true, **hints)
except Exception:
debug('_laplace_apply_rules did not match.')
if f.has(DiracDelta):
return None
prog_rules = [_laplace_rule_timescale, _laplace_rule_heaviside,
_laplace_rule_exp, _laplace_rule_trig, _laplace_rule_diff]
for p_rule in prog_rules:
LT = p_rule(f, t, s, doit=doit, **hints)
if LT is not None:
return LT
return None
class LaplaceTransform(IntegralTransform):
"""
Class representing unevaluated Laplace transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute Laplace transforms, see the :func:`laplace_transform`
docstring.
"""
_name = 'Laplace'
def _compute_transform(self, f, t, s, **hints):
LT = _laplace_apply_rules(f, t, s, **hints)
if LT is None:
_simplify = hints.pop('simplify', True)
debug('_laplace_apply_rules could not match function %s'%(f,))
debug(' hints: %s'%(hints,))
return _laplace_transform(f, t, s, simplify=_simplify, **hints)
else:
return LT
def _as_integral(self, f, t, s):
return Integral(f*exp(-s*t), (t, S.Zero, S.Infinity))
def _collapse_extra(self, extra):
conds = []
planes = []
for plane, cond in extra:
conds.append(cond)
planes.append(plane)
cond = And(*conds)
plane = Max(*planes)
if cond == False:
raise IntegralTransformError(
'Laplace', None, 'No combined convergence.')
return plane, cond
def _try_directly(self, **hints):
fn = self.function
debug('----> _try_directly: %s'%(fn, ))
t_ = self.function_variable
s_ = self.transform_variable
LT = None
if not fn.is_Add:
fn = expand_mul(fn)
try:
LT = self._compute_transform(fn, t_, s_, **hints)
except IntegralTransformError:
LT = None
return fn, LT
def laplace_transform(f, t, s, legacy_matrix=True, **hints):
r"""
Compute the Laplace Transform `F(s)` of `f(t)`,
.. math :: F(s) = \int_{0^{-}}^\infty e^{-st} f(t) \mathrm{d}t.
Explanation
===========
For all sensible functions, this converges absolutely in a
half-plane
.. math :: a < \operatorname{Re}(s)
This function returns ``(F, a, cond)`` where ``F`` is the Laplace
transform of ``f``, `a` is the half-plane of convergence, and `cond` are
auxiliary convergence conditions.
The implementation is rule-based, and if you are interested in which
rules are applied, and whether integration is attemped, you can switch
debug information on by setting `sympy.SYMPY_DEBUG=True`.
The lower bound is `0-`, meaning that this bound should be approached
from the lower side. This is only necessary if distributions are involved.
At present, it is only done if `f(t)` contains ``DiracDelta``, in which
case the Laplace transform is computed implicitly as
.. math :: F(s) = \lim_{\tau\to 0^{-}} \int_{\tau}^\infty e^{-st} f(t) \mathrm{d}t
by applying rules.
If the integral cannot be fully computed in closed form, this function
returns an unevaluated :class:`LaplaceTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`. If ``noconds=True``,
only `F` will be returned (i.e. not ``cond``, and also not the plane ``a``).
.. deprecated:: 1.9
Legacy behavior for matrices where ``laplace_transform`` with
``noconds=False`` (the default) returns a Matrix whose elements are
tuples. The behavior of ``laplace_transform`` for matrices will change
in a future release of SymPy to return a tuple of the transformed
Matrix and the convergence conditions for the matrix as a whole. Use
``legacy_matrix=False`` to enable the new behavior.
Examples
========
>>> from sympy import DiracDelta, exp, laplace_transform
>>> from sympy.abc import t, s, a
>>> laplace_transform(t**4, t, s)
(24/s**5, 0, True)
>>> laplace_transform(t**a, t, s)
(gamma(a + 1)/(s*s**a), 0, re(a) > -1)
>>> laplace_transform(DiracDelta(t)-a*exp(-a*t),t,s)
(s/(a + s), Max(0, -a), True)
See Also
========
inverse_laplace_transform, mellin_transform, fourier_transform
hankel_transform, inverse_hankel_transform
"""
debug('\n***** laplace_transform(%s, %s, %s)'%(f, t, s))
if isinstance(f, MatrixBase) and hasattr(f, 'applyfunc'):
conds = not hints.get('noconds', False)
if conds and legacy_matrix:
SymPyDeprecationWarning(
feature="laplace_transform of a Matrix with noconds=False (default)",
useinstead="the option legacy_matrix=False to get the new behaviour",
issue=21504,
deprecated_since_version="1.9"
).warn()
return f.applyfunc(lambda fij: laplace_transform(fij, t, s, **hints))
else:
elements_trans = [laplace_transform(fij, t, s, **hints) for fij in f]
if conds:
elements, avals, conditions = zip(*elements_trans)
f_laplace = type(f)(*f.shape, elements)
return f_laplace, Max(*avals), And(*conditions)
else:
return type(f)(*f.shape, elements_trans)
return LaplaceTransform(f, t, s).doit(**hints)
@_noconds_(True)
def _inverse_laplace_transform(F, s, t_, plane, simplify=True):
""" The backend function for inverse Laplace transforms. """
from sympy.integrals.meijerint import meijerint_inversion, _get_coeff_exp
# There are two strategies we can try:
# 1) Use inverse mellin transforms - related by a simple change of variables.
# 2) Use the inversion integral.
t = Dummy('t', real=True)
def pw_simp(*args):
""" Simplify a piecewise expression from hyperexpand. """
# XXX we break modularity here!
if len(args) != 3:
return Piecewise(*args)
arg = args[2].args[0].argument
coeff, exponent = _get_coeff_exp(arg, t)
e1 = args[0].args[0]
e2 = args[1].args[0]
return Heaviside(1/Abs(coeff) - t**exponent)*e1 \
+ Heaviside(t**exponent - 1/Abs(coeff))*e2
if F.is_rational_function(s):
F = F.apart(s)
if F.is_Add:
f = Add(*[_inverse_laplace_transform(X, s, t, plane, simplify)\
for X in F.args])
return _simplify(f.subs(t, t_), simplify), True
try:
f, cond = inverse_mellin_transform(F, s, exp(-t), (None, S.Infinity),
needeval=True, noconds=False)
except IntegralTransformError:
f = None
if f is None:
f = meijerint_inversion(F, s, t)
if f is None:
raise IntegralTransformError('Inverse Laplace', f, '')
if f.is_Piecewise:
f, cond = f.args[0]
if f.has(Integral):
raise IntegralTransformError('Inverse Laplace', f,
'inversion integral of unrecognised form.')
else:
cond = S.true
f = f.replace(Piecewise, pw_simp)
if f.is_Piecewise:
# many of the functions called below can't work with piecewise
# (b/c it has a bool in args)
return f.subs(t, t_), cond
u = Dummy('u')
def simp_heaviside(arg, H0=S.Half):
a = arg.subs(exp(-t), u)
if a.has(t):
return Heaviside(arg, H0)
from sympy.solvers.inequalities import _solve_inequality
rel = _solve_inequality(a > 0, u)
if rel.lts == u:
k = log(rel.gts)
return Heaviside(t + k, H0)
else:
k = log(rel.lts)
return Heaviside(-(t + k), H0)
f = f.replace(Heaviside, simp_heaviside)
def simp_exp(arg):
return expand_complex(exp(arg))
f = f.replace(exp, simp_exp)
# TODO it would be nice to fix cosh and sinh ... simplify messes these
# exponentials up
return _simplify(f.subs(t, t_), simplify), cond
class InverseLaplaceTransform(IntegralTransform):
"""
Class representing unevaluated inverse Laplace transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute inverse Laplace transforms, see the
:func:`inverse_laplace_transform` docstring.
"""
_name = 'Inverse Laplace'
_none_sentinel = Dummy('None')
_c = Dummy('c')
def __new__(cls, F, s, x, plane, **opts):
if plane is None:
plane = InverseLaplaceTransform._none_sentinel
return IntegralTransform.__new__(cls, F, s, x, plane, **opts)
@property
def fundamental_plane(self):
plane = self.args[3]
if plane is InverseLaplaceTransform._none_sentinel:
plane = None
return plane
def _compute_transform(self, F, s, t, **hints):
return _inverse_laplace_transform(F, s, t, self.fundamental_plane, **hints)
def _as_integral(self, F, s, t):
c = self.__class__._c
return Integral(exp(s*t)*F, (s, c - S.ImaginaryUnit*S.Infinity,
c + S.ImaginaryUnit*S.Infinity))/(2*S.Pi*S.ImaginaryUnit)
def inverse_laplace_transform(F, s, t, plane=None, **hints):
r"""
Compute the inverse Laplace transform of `F(s)`, defined as
.. math :: f(t) = \frac{1}{2\pi i} \int_{c-i\infty}^{c+i\infty} e^{st} F(s) \mathrm{d}s,
for `c` so large that `F(s)` has no singularites in the
half-plane `\operatorname{Re}(s) > c-\epsilon`.
Explanation
===========
The plane can be specified by
argument ``plane``, but will be inferred if passed as None.
Under certain regularity conditions, this recovers `f(t)` from its
Laplace Transform `F(s)`, for non-negative `t`, and vice
versa.
If the integral cannot be computed in closed form, this function returns
an unevaluated :class:`InverseLaplaceTransform` object.
Note that this function will always assume `t` to be real,
regardless of the SymPy assumption on `t`.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Examples
========
>>> from sympy import inverse_laplace_transform, exp, Symbol
>>> from sympy.abc import s, t
>>> a = Symbol('a', positive=True)
>>> inverse_laplace_transform(exp(-a*s)/s, s, t)
Heaviside(-a + t)
See Also
========
laplace_transform, _fast_inverse_laplace
hankel_transform, inverse_hankel_transform
"""
if isinstance(F, MatrixBase) and hasattr(F, 'applyfunc'):
return F.applyfunc(lambda Fij: inverse_laplace_transform(Fij, s, t, plane, **hints))
return InverseLaplaceTransform(F, s, t, plane).doit(**hints)
def _fast_inverse_laplace(e, s, t):
"""Fast inverse Laplace transform of rational function including RootSum"""
a, b, n = symbols('a, b, n', cls=Wild, exclude=[s])
def _ilt(e):
if not e.has(s):
return e
elif e.is_Add:
return _ilt_add(e)
elif e.is_Mul:
return _ilt_mul(e)
elif e.is_Pow:
return _ilt_pow(e)
elif isinstance(e, RootSum):
return _ilt_rootsum(e)
else:
raise NotImplementedError
def _ilt_add(e):
return e.func(*map(_ilt, e.args))
def _ilt_mul(e):
coeff, expr = e.as_independent(s)
if expr.is_Mul:
raise NotImplementedError
return coeff * _ilt(expr)
def _ilt_pow(e):
match = e.match((a*s + b)**n)
if match is not None:
nm, am, bm = match[n], match[a], match[b]
if nm.is_Integer and nm < 0:
return t**(-nm-1)*exp(-(bm/am)*t)/(am**-nm*gamma(-nm))
if nm == 1:
return exp(-(bm/am)*t) / am
raise NotImplementedError
def _ilt_rootsum(e):
expr = e.fun.expr
[variable] = e.fun.variables
return RootSum(e.poly, Lambda(variable, together(_ilt(expr))))
return _ilt(e)
##########################################################################
# Fourier Transform
##########################################################################
@_noconds_(True)
def _fourier_transform(f, x, k, a, b, name, simplify=True):
r"""
Compute a general Fourier-type transform
.. math::
F(k) = a \int_{-\infty}^{\infty} e^{bixk} f(x)\, dx.
For suitable choice of *a* and *b*, this reduces to the standard Fourier
and inverse Fourier transforms.
"""
F = integrate(a*f*exp(b*S.ImaginaryUnit*x*k), (x, S.NegativeInfinity, S.Infinity))
if not F.has(Integral):
return _simplify(F, simplify), S.true
integral_f = integrate(f, (x, S.NegativeInfinity, S.Infinity))
if integral_f in (S.NegativeInfinity, S.Infinity, S.NaN) or integral_f.has(Integral):
raise IntegralTransformError(name, f, 'function not integrable on real axis')
if not F.is_Piecewise:
raise IntegralTransformError(name, f, 'could not compute integral')
F, cond = F.args[0]
if F.has(Integral):
raise IntegralTransformError(name, f, 'integral in unexpected form')
return _simplify(F, simplify), cond
class FourierTypeTransform(IntegralTransform):
""" Base class for Fourier transforms."""
def a(self):
raise NotImplementedError(
"Class %s must implement a(self) but does not" % self.__class__)
def b(self):
raise NotImplementedError(
"Class %s must implement b(self) but does not" % self.__class__)
def _compute_transform(self, f, x, k, **hints):
return _fourier_transform(f, x, k,
self.a(), self.b(),
self.__class__._name, **hints)
def _as_integral(self, f, x, k):
a = self.a()
b = self.b()
return Integral(a*f*exp(b*S.ImaginaryUnit*x*k), (x, S.NegativeInfinity, S.Infinity))
class FourierTransform(FourierTypeTransform):
"""
Class representing unevaluated Fourier transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute Fourier transforms, see the :func:`fourier_transform`
docstring.
"""
_name = 'Fourier'
def a(self):
return 1
def b(self):
return -2*S.Pi
def fourier_transform(f, x, k, **hints):
r"""
Compute the unitary, ordinary-frequency Fourier transform of ``f``, defined
as
.. math:: F(k) = \int_{-\infty}^\infty f(x) e^{-2\pi i x k} \mathrm{d} x.
Explanation
===========
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`FourierTransform` object.
For other Fourier transform conventions, see the function
:func:`sympy.integrals.transforms._fourier_transform`.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
Examples
========
>>> from sympy import fourier_transform, exp
>>> from sympy.abc import x, k
>>> fourier_transform(exp(-x**2), x, k)
sqrt(pi)*exp(-pi**2*k**2)
>>> fourier_transform(exp(-x**2), x, k, noconds=False)
(sqrt(pi)*exp(-pi**2*k**2), True)
See Also
========
inverse_fourier_transform
sine_transform, inverse_sine_transform
cosine_transform, inverse_cosine_transform
hankel_transform, inverse_hankel_transform
mellin_transform, laplace_transform
"""
return FourierTransform(f, x, k).doit(**hints)
class InverseFourierTransform(FourierTypeTransform):
"""
Class representing unevaluated inverse Fourier transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute inverse Fourier transforms, see the
:func:`inverse_fourier_transform` docstring.
"""
_name = 'Inverse Fourier'
def a(self):
return 1
def b(self):
return 2*S.Pi
def inverse_fourier_transform(F, k, x, **hints):
r"""
Compute the unitary, ordinary-frequency inverse Fourier transform of `F`,
defined as
.. math:: f(x) = \int_{-\infty}^\infty F(k) e^{2\pi i x k} \mathrm{d} k.
Explanation
===========
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`InverseFourierTransform` object.
For other Fourier transform conventions, see the function
:func:`sympy.integrals.transforms._fourier_transform`.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
Examples
========
>>> from sympy import inverse_fourier_transform, exp, sqrt, pi
>>> from sympy.abc import x, k
>>> inverse_fourier_transform(sqrt(pi)*exp(-(pi*k)**2), k, x)
exp(-x**2)
>>> inverse_fourier_transform(sqrt(pi)*exp(-(pi*k)**2), k, x, noconds=False)
(exp(-x**2), True)
See Also
========
fourier_transform
sine_transform, inverse_sine_transform
cosine_transform, inverse_cosine_transform
hankel_transform, inverse_hankel_transform
mellin_transform, laplace_transform
"""
return InverseFourierTransform(F, k, x).doit(**hints)
##########################################################################
# Fourier Sine and Cosine Transform
##########################################################################
@_noconds_(True)
def _sine_cosine_transform(f, x, k, a, b, K, name, simplify=True):
"""
Compute a general sine or cosine-type transform
F(k) = a int_0^oo b*sin(x*k) f(x) dx.
F(k) = a int_0^oo b*cos(x*k) f(x) dx.
For suitable choice of a and b, this reduces to the standard sine/cosine
and inverse sine/cosine transforms.
"""
F = integrate(a*f*K(b*x*k), (x, S.Zero, S.Infinity))
if not F.has(Integral):
return _simplify(F, simplify), S.true
if not F.is_Piecewise:
raise IntegralTransformError(name, f, 'could not compute integral')
F, cond = F.args[0]
if F.has(Integral):
raise IntegralTransformError(name, f, 'integral in unexpected form')
return _simplify(F, simplify), cond
class SineCosineTypeTransform(IntegralTransform):
"""
Base class for sine and cosine transforms.
Specify cls._kern.
"""
def a(self):
raise NotImplementedError(
"Class %s must implement a(self) but does not" % self.__class__)
def b(self):
raise NotImplementedError(
"Class %s must implement b(self) but does not" % self.__class__)
def _compute_transform(self, f, x, k, **hints):
return _sine_cosine_transform(f, x, k,
self.a(), self.b(),
self.__class__._kern,
self.__class__._name, **hints)
def _as_integral(self, f, x, k):
a = self.a()
b = self.b()
K = self.__class__._kern
return Integral(a*f*K(b*x*k), (x, S.Zero, S.Infinity))
class SineTransform(SineCosineTypeTransform):
"""
Class representing unevaluated sine transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute sine transforms, see the :func:`sine_transform`
docstring.
"""
_name = 'Sine'
_kern = sin
def a(self):
return sqrt(2)/sqrt(pi)
def b(self):
return S.One
def sine_transform(f, x, k, **hints):
r"""
Compute the unitary, ordinary-frequency sine transform of `f`, defined
as
.. math:: F(k) = \sqrt{\frac{2}{\pi}} \int_{0}^\infty f(x) \sin(2\pi x k) \mathrm{d} x.
Explanation
===========
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`SineTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
Examples
========
>>> from sympy import sine_transform, exp
>>> from sympy.abc import x, k, a
>>> sine_transform(x*exp(-a*x**2), x, k)
sqrt(2)*k*exp(-k**2/(4*a))/(4*a**(3/2))
>>> sine_transform(x**(-a), x, k)
2**(1/2 - a)*k**(a - 1)*gamma(1 - a/2)/gamma(a/2 + 1/2)
See Also
========
fourier_transform, inverse_fourier_transform
inverse_sine_transform
cosine_transform, inverse_cosine_transform
hankel_transform, inverse_hankel_transform
mellin_transform, laplace_transform
"""
return SineTransform(f, x, k).doit(**hints)
class InverseSineTransform(SineCosineTypeTransform):
"""
Class representing unevaluated inverse sine transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute inverse sine transforms, see the
:func:`inverse_sine_transform` docstring.
"""
_name = 'Inverse Sine'
_kern = sin
def a(self):
return sqrt(2)/sqrt(pi)
def b(self):
return S.One
def inverse_sine_transform(F, k, x, **hints):
r"""
Compute the unitary, ordinary-frequency inverse sine transform of `F`,
defined as
.. math:: f(x) = \sqrt{\frac{2}{\pi}} \int_{0}^\infty F(k) \sin(2\pi x k) \mathrm{d} k.
Explanation
===========
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`InverseSineTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
Examples
========
>>> from sympy import inverse_sine_transform, exp, sqrt, gamma
>>> from sympy.abc import x, k, a
>>> inverse_sine_transform(2**((1-2*a)/2)*k**(a - 1)*
... gamma(-a/2 + 1)/gamma((a+1)/2), k, x)
x**(-a)
>>> inverse_sine_transform(sqrt(2)*k*exp(-k**2/(4*a))/(4*sqrt(a)**3), k, x)
x*exp(-a*x**2)
See Also
========
fourier_transform, inverse_fourier_transform
sine_transform
cosine_transform, inverse_cosine_transform
hankel_transform, inverse_hankel_transform
mellin_transform, laplace_transform
"""
return InverseSineTransform(F, k, x).doit(**hints)
class CosineTransform(SineCosineTypeTransform):
"""
Class representing unevaluated cosine transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute cosine transforms, see the :func:`cosine_transform`
docstring.
"""
_name = 'Cosine'
_kern = cos
def a(self):
return sqrt(2)/sqrt(pi)
def b(self):
return S.One
def cosine_transform(f, x, k, **hints):
r"""
Compute the unitary, ordinary-frequency cosine transform of `f`, defined
as
.. math:: F(k) = \sqrt{\frac{2}{\pi}} \int_{0}^\infty f(x) \cos(2\pi x k) \mathrm{d} x.
Explanation
===========
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`CosineTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
Examples
========
>>> from sympy import cosine_transform, exp, sqrt, cos
>>> from sympy.abc import x, k, a
>>> cosine_transform(exp(-a*x), x, k)
sqrt(2)*a/(sqrt(pi)*(a**2 + k**2))
>>> cosine_transform(exp(-a*sqrt(x))*cos(a*sqrt(x)), x, k)
a*exp(-a**2/(2*k))/(2*k**(3/2))
See Also
========
fourier_transform, inverse_fourier_transform,
sine_transform, inverse_sine_transform
inverse_cosine_transform
hankel_transform, inverse_hankel_transform
mellin_transform, laplace_transform
"""
return CosineTransform(f, x, k).doit(**hints)
class InverseCosineTransform(SineCosineTypeTransform):
"""
Class representing unevaluated inverse cosine transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute inverse cosine transforms, see the
:func:`inverse_cosine_transform` docstring.
"""
_name = 'Inverse Cosine'
_kern = cos
def a(self):
return sqrt(2)/sqrt(pi)
def b(self):
return S.One
def inverse_cosine_transform(F, k, x, **hints):
r"""
Compute the unitary, ordinary-frequency inverse cosine transform of `F`,
defined as
.. math:: f(x) = \sqrt{\frac{2}{\pi}} \int_{0}^\infty F(k) \cos(2\pi x k) \mathrm{d} k.
Explanation
===========
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`InverseCosineTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
Examples
========
>>> from sympy import inverse_cosine_transform, sqrt, pi
>>> from sympy.abc import x, k, a
>>> inverse_cosine_transform(sqrt(2)*a/(sqrt(pi)*(a**2 + k**2)), k, x)
exp(-a*x)
>>> inverse_cosine_transform(1/sqrt(k), k, x)
1/sqrt(x)
See Also
========
fourier_transform, inverse_fourier_transform,
sine_transform, inverse_sine_transform
cosine_transform
hankel_transform, inverse_hankel_transform
mellin_transform, laplace_transform
"""
return InverseCosineTransform(F, k, x).doit(**hints)
##########################################################################
# Hankel Transform
##########################################################################
@_noconds_(True)
def _hankel_transform(f, r, k, nu, name, simplify=True):
r"""
Compute a general Hankel transform
.. math:: F_\nu(k) = \int_{0}^\infty f(r) J_\nu(k r) r \mathrm{d} r.
"""
F = integrate(f*besselj(nu, k*r)*r, (r, S.Zero, S.Infinity))
if not F.has(Integral):
return _simplify(F, simplify), S.true
if not F.is_Piecewise:
raise IntegralTransformError(name, f, 'could not compute integral')
F, cond = F.args[0]
if F.has(Integral):
raise IntegralTransformError(name, f, 'integral in unexpected form')
return _simplify(F, simplify), cond
class HankelTypeTransform(IntegralTransform):
"""
Base class for Hankel transforms.
"""
def doit(self, **hints):
return self._compute_transform(self.function,
self.function_variable,
self.transform_variable,
self.args[3],
**hints)
def _compute_transform(self, f, r, k, nu, **hints):
return _hankel_transform(f, r, k, nu, self._name, **hints)
def _as_integral(self, f, r, k, nu):
return Integral(f*besselj(nu, k*r)*r, (r, S.Zero, S.Infinity))
@property
def as_integral(self):
return self._as_integral(self.function,
self.function_variable,
self.transform_variable,
self.args[3])
class HankelTransform(HankelTypeTransform):
"""
Class representing unevaluated Hankel transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute Hankel transforms, see the :func:`hankel_transform`
docstring.
"""
_name = 'Hankel'
def hankel_transform(f, r, k, nu, **hints):
r"""
Compute the Hankel transform of `f`, defined as
.. math:: F_\nu(k) = \int_{0}^\infty f(r) J_\nu(k r) r \mathrm{d} r.
Explanation
===========
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`HankelTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
Examples
========
>>> from sympy import hankel_transform, inverse_hankel_transform
>>> from sympy import exp
>>> from sympy.abc import r, k, m, nu, a
>>> ht = hankel_transform(1/r**m, r, k, nu)
>>> ht
2*k**(m - 2)*gamma(-m/2 + nu/2 + 1)/(2**m*gamma(m/2 + nu/2))
>>> inverse_hankel_transform(ht, k, r, nu)
r**(-m)
>>> ht = hankel_transform(exp(-a*r), r, k, 0)
>>> ht
a/(k**3*(a**2/k**2 + 1)**(3/2))
>>> inverse_hankel_transform(ht, k, r, 0)
exp(-a*r)
See Also
========
fourier_transform, inverse_fourier_transform
sine_transform, inverse_sine_transform
cosine_transform, inverse_cosine_transform
inverse_hankel_transform
mellin_transform, laplace_transform
"""
return HankelTransform(f, r, k, nu).doit(**hints)
class InverseHankelTransform(HankelTypeTransform):
"""
Class representing unevaluated inverse Hankel transforms.
For usage of this class, see the :class:`IntegralTransform` docstring.
For how to compute inverse Hankel transforms, see the
:func:`inverse_hankel_transform` docstring.
"""
_name = 'Inverse Hankel'
def inverse_hankel_transform(F, k, r, nu, **hints):
r"""
Compute the inverse Hankel transform of `F` defined as
.. math:: f(r) = \int_{0}^\infty F_\nu(k) J_\nu(k r) k \mathrm{d} k.
Explanation
===========
If the transform cannot be computed in closed form, this
function returns an unevaluated :class:`InverseHankelTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`.
Note that for this transform, by default ``noconds=True``.
Examples
========
>>> from sympy import hankel_transform, inverse_hankel_transform
>>> from sympy import exp
>>> from sympy.abc import r, k, m, nu, a
>>> ht = hankel_transform(1/r**m, r, k, nu)
>>> ht
2*k**(m - 2)*gamma(-m/2 + nu/2 + 1)/(2**m*gamma(m/2 + nu/2))
>>> inverse_hankel_transform(ht, k, r, nu)
r**(-m)
>>> ht = hankel_transform(exp(-a*r), r, k, 0)
>>> ht
a/(k**3*(a**2/k**2 + 1)**(3/2))
>>> inverse_hankel_transform(ht, k, r, 0)
exp(-a*r)
See Also
========
fourier_transform, inverse_fourier_transform
sine_transform, inverse_sine_transform
cosine_transform, inverse_cosine_transform
hankel_transform
mellin_transform, laplace_transform
"""
return InverseHankelTransform(F, k, r, nu).doit(**hints)
| 32.714901
| 98
| 0.554258
|
ac0708b7c5841d23b40a3b627c05048599668bfe
| 5,890
|
py
|
Python
|
ROSSi_workspace/rossi_plugin/src/rossi_plugin/Ros2UI/UI/Editors/LaunchFileEditor/GraphEntities/RosExecutableGraphEntity.py
|
isse-augsburg/ROSSi
|
66a23b6c133069325096d6e199e53d293e42d61b
|
[
"MIT"
] | null | null | null |
ROSSi_workspace/rossi_plugin/src/rossi_plugin/Ros2UI/UI/Editors/LaunchFileEditor/GraphEntities/RosExecutableGraphEntity.py
|
isse-augsburg/ROSSi
|
66a23b6c133069325096d6e199e53d293e42d61b
|
[
"MIT"
] | null | null | null |
ROSSi_workspace/rossi_plugin/src/rossi_plugin/Ros2UI/UI/Editors/LaunchFileEditor/GraphEntities/RosExecutableGraphEntity.py
|
isse-augsburg/ROSSi
|
66a23b6c133069325096d6e199e53d293e42d61b
|
[
"MIT"
] | null | null | null |
from typing import Dict, List
from PyQt5 import QtCore
from PyQt5.QtCore import QRectF
from PyQt5.QtWidgets import QGraphicsItem
from .ParameterGraphEntity import ParameterGraphEntity
from .....utils import fullname
from .....Representations.Ros2Representations.RosPackage import RosExecutable, RosPackage
from ....UIElements.DialogWindow import OptionParameter
from ....BaseGraphEntities.AbstractGraphEntity import DataObject
from ....BaseGraphEntities.AbstractGraphPortEntity import Connect
from ....BaseGraphEntities.StandartGraphEntity import StandartGraphEntity
class RosExecutableGraphEntity(StandartGraphEntity):
display_name: str
namespace: str
parameters: List[ParameterGraphEntity]
param_height: float = 15
original_height: float = 40
def __init__(self, exe: RosExecutable, x: float, y: float, width: float = 222, height: float = 40, dragable: bool = False, display_name: str = "", parent: QGraphicsItem = None, id: int = -1, show_parameters=True):
super().__init__(parent, id, x, y, width, self.original_height)
self.exe = exe
self.dragable = dragable
self.display_name = exe.displayName if display_name is "" else display_name
#NodeInfo(self.exe.package.name, self.exe.executableName).get_topics(self.call)
self.exportable = True
self.namespace = None
self.parameters = []
if show_parameters:
self.add_new_parameter()
def add_new_parameter(self) -> ParameterGraphEntity:
param = ParameterGraphEntity(0, self.height, self.width, self.param_height, self)
param.disabled = True
param.port.add_connect_observer(self.on_port_connect_event)
self.parameters.append(param)
self.height += self.param_height
return param
def on_port_connect_event(self, connected: Connect):
if connected is None:
pass
else:
for param in self.parameters:
print(param.port.connected_to)
if param.disabled and param.port.connected_to is None and param.name is None:
return
self.add_new_parameter()
def call(self, info: str):
print(info)
def paint(self, painter, option, widget):
super(RosExecutableGraphEntity, self).paint(painter, option, widget)
painter.drawText(QRectF(0, 0, self.width, self.original_height/2), QtCore.Qt.AlignCenter | QtCore.Qt.AlignCenter, self.display_name)
painter.drawText(QRectF(0, self.original_height/2, self.width, self.original_height/2), QtCore.Qt.AlignCenter | QtCore.Qt.AlignCenter, self.exe.package.name)
def getData(self):
key = fullname(self)
values = {
"executable": self.exe,
"displayname": self.display_name
}
self.data = DataObject(key, values)
return self.data
@staticmethod
def getObjectFromData(data: DataObject) -> 'StandartGraphEntity':
item = RosExecutableGraphEntity(data.values["executable"], 0, 0, data.values["width"],
data.values["height"], dragable=False)
return item
def __namespacehelper(self) -> str:
if self.namespace is not None and self.namespace is not "":
return "namespace='" + self.namespace + "',"
return ""
def toCode(self, intendLevel: int = 0):
intend = "\t" * intendLevel
imports = [
"from launch_ros.actions import Node"
]
ret = [
intend + "Node(",
intend + "\t" + "package='"+self.exe.package.name+"',",
intend + "\t" + self.__namespacehelper(),
intend + "\tnode_executable='" + self.exe.executableName + "',",
intend + "\tnode_name='" + self.display_name + "',",
intend + "\toutput='screen',",
intend + "\tparameters=[{\n" + self._paramsToCode(intendLevel+2),
intend + "\t}],",
intend + "),"
]
return ret, imports
def _toDict(self) -> Dict:
ret = {
"name": self.display_name,
"executable": self.exe.executableName,
"packageName": self.exe.package.name,
"parameters": self._paramsToDict()
}
return ret
def _paramsToCode(self, intendLevel) -> str:
ret = []
for param in self.parameters:
ret.append(param.toCode(intendLevel))
retu = ""
for s in ret:
retu += s
return retu
def _paramsToDict(self):
ret = []
for param in self.parameters:
if param.disabled is False:
ret.append(param._toDict())
return ret
def setDisplayName(self, name: str):
self.display_name = name
def setNameSpace(self, namespace):
self.namespace = namespace
def getProperties(self):
ret = [OptionParameter("display name", self.display_name, self.setDisplayName)]
for param in self.parameters:
ret.append(OptionParameter("parameter name", param.name, param.setName))
return ret
def onRemoveEvent(self):
pass
@staticmethod
def fromJson(json: Dict) -> 'StandartGraphEntity':
exe = RosExecutableGraphEntity(
RosExecutable(json["executable"], json["executable"], RosPackage(json["packageName"])),
json["x"],
json["y"],
json["width"],
json["height"],
id=json["id"],
display_name=json["name"],
show_parameters=False
)
for item in json["parameters"]:
param = exe.add_new_parameter()
param.name = item["name"]
param.disabled = item["disabled"]
param.port.has_to_connect_to_id = item["port"]["connected_to"]
exe.add_new_parameter()
return exe
| 35.914634
| 217
| 0.617148
|
178c3c18b230a90a86eac6b5500d72c36612e9c4
| 1,120
|
py
|
Python
|
ipython-extension/setup.py
|
kernelpanek/jupyterlab-autoplot
|
023b0b6a1ebc1857b4dab95c04286d45ec70fc42
|
[
"BSD-3-Clause"
] | 48
|
2021-01-27T14:40:00.000Z
|
2022-03-31T10:15:35.000Z
|
ipython-extension/setup.py
|
kernelpanek/jupyterlab-autoplot
|
023b0b6a1ebc1857b4dab95c04286d45ec70fc42
|
[
"BSD-3-Clause"
] | 1
|
2021-03-11T06:31:35.000Z
|
2021-07-29T18:47:29.000Z
|
ipython-extension/setup.py
|
kernelpanek/jupyterlab-autoplot
|
023b0b6a1ebc1857b4dab95c04286d45ec70fc42
|
[
"BSD-3-Clause"
] | 5
|
2021-04-22T17:44:12.000Z
|
2022-02-09T22:47:16.000Z
|
from setuptools import setup, find_packages
def get_long_description():
with open("README.md", "r", encoding="utf-8") as f:
desc = f.read()
return desc
setup(
name="jupyterlab-autoplot",
version="0.2.0",
author="Man Alpha Technology",
author_email="ManAlphaTech@man.com",
license="BSD 3-Clause",
description="The IPython component for the Autoplot JupyterLab extension.",
long_description=get_long_description(),
url="https://github.com/man-group/jupyterlab-autoplot",
keywords=["jupyter", "jupyterlab", "matplotlib", "mpld3", "time series"],
packages=find_packages(include=["autoplot", "autoplot.*"], exclude=["tests", "tests.*"]),
include_package_data=True,
install_requires=["ipywidgets", "ipython", "numpy", "pandas", "matplotlib", "mpld3", "dtale>=1.16.0,<1.36"],
tests_require=["pytest", "pytest-cov", "mock"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Framework :: Jupyter",
],
python_requires=">=3.6",
)
| 35
| 112
| 0.648214
|
69923a44bb2fed5d1058feccdd07bc0b34d6a23e
| 1,756
|
py
|
Python
|
neptune/new/internal/backends/factory.py
|
Raalsky/neptune-client
|
24ac58581774e61056d49cd1a22727799c14ad54
|
[
"Apache-2.0"
] | 254
|
2020-01-27T14:18:57.000Z
|
2022-03-31T21:40:33.000Z
|
neptune/new/internal/backends/factory.py
|
Raalsky/neptune-client
|
24ac58581774e61056d49cd1a22727799c14ad54
|
[
"Apache-2.0"
] | 160
|
2020-02-05T11:00:22.000Z
|
2022-03-31T08:50:24.000Z
|
neptune/new/internal/backends/factory.py
|
Raalsky/neptune-client
|
24ac58581774e61056d49cd1a22727799c14ad54
|
[
"Apache-2.0"
] | 23
|
2020-02-07T09:19:50.000Z
|
2022-02-15T09:52:56.000Z
|
#
# Copyright (c) 2021, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Optional
from neptune.new.internal.credentials import Credentials
from neptune.new.types.mode import Mode
from .hosted_neptune_backend import HostedNeptuneBackend
from .neptune_backend import NeptuneBackend
from .neptune_backend_mock import NeptuneBackendMock
from .offline_neptune_backend import OfflineNeptuneBackend
def get_backend(
mode: Mode, api_token: Optional[str] = None, proxies: Optional[dict] = None
) -> NeptuneBackend:
if mode == Mode.ASYNC:
return HostedNeptuneBackend(
credentials=Credentials.from_token(api_token=api_token), proxies=proxies
)
elif mode == Mode.SYNC:
return HostedNeptuneBackend(
credentials=Credentials.from_token(api_token=api_token), proxies=proxies
)
elif mode == Mode.DEBUG:
return NeptuneBackendMock()
elif mode == Mode.OFFLINE:
return OfflineNeptuneBackend()
elif mode == Mode.READ_ONLY:
return HostedNeptuneBackend(
credentials=Credentials.from_token(api_token=api_token), proxies=proxies
)
else:
raise ValueError(f"mode should be one of {[m for m in Mode]}")
| 36.583333
| 84
| 0.732346
|
f0915f440b653f9ce3fd0a7026433327d779dd1c
| 1,588
|
py
|
Python
|
dtdecoderutils.py
|
dyeo/dt.py
|
6ca1913e30c35e1661b2959849e55376fec34ed0
|
[
"MIT"
] | null | null | null |
dtdecoderutils.py
|
dyeo/dt.py
|
6ca1913e30c35e1661b2959849e55376fec34ed0
|
[
"MIT"
] | null | null | null |
dtdecoderutils.py
|
dyeo/dt.py
|
6ca1913e30c35e1661b2959849e55376fec34ed0
|
[
"MIT"
] | null | null | null |
import re
# The error raised when the dt file is invalid.
class DTDecodeError(Exception): pass
# Type aliases
_BOOL = 0
_BYTE = 1
_INT = 2
_SHORT = 3
_LONG = 4
_FLOAT = 5
_DOUBLE = 6
_CHAR = 7
_STRING = 8
# Tokenizer regular expression
_rx_tok = r";.*$|\"(?:[^\"\\]|\\.)*\"|\'\\?.\'|[\[\]{}:;]|[^\s\[\]{}:;]+"
# Key pattern
_rx_key = re.compile(r"^(?!true|false)(?:[_a-zA-Z][_a-zA-Z0-9]*)$")
# Value patterns
_rx_val = {
_BOOL: re.compile(r"^(true|false)$"),
_BYTE: re.compile(r"^0[xX]([0-9a-fA-F])+$"),
_INT: re.compile(r"^([0-9]+)$"),
_SHORT: re.compile(r"^([0-9]+)[sS]$"),
_LONG: re.compile(r"^([0-9]+)[lL]$"),
_FLOAT: re.compile(r"^([0-9]*\.[0-9]*)[fF]$"),
_DOUBLE: re.compile(r"^([0-9]*\.[0-9]*)$"),
_CHAR: re.compile(r"^'(\\?.)'$"),
_STRING: re.compile(r"^\"((?:[^\"\\]|\\.)*)\"$"),
}
# Returns a python-acceptable primitive from a datatag token
def _get_value(token):
for k,v in _rx_val.items():
match = re.match(v, token)
if match:
val = match.group(1)
if not val:
raise DTDecodeError(f"Unkown value type for value {state.tokens[state.iter]}")
if k == _BOOL:
val = val == "true"
elif k in {_BYTE,_INT,_SHORT,_LONG}:
val = int(val, 16 if k == "byte" else 10)
elif k in {_FLOAT, _DOUBLE}:
val = float(val)
else:
val = str(val.encode("utf-8").decode("unicode_escape"))
return val
| 30.538462
| 94
| 0.487406
|
7080c96ddc9a5b6ffb5deb5fe086803a88f886e6
| 4,419
|
py
|
Python
|
data/mri_data.py
|
veritas9872/fastMRI-kspace
|
4c484b3183e9f06838b5ee108af283611c2e1e77
|
[
"MIT"
] | 18
|
2019-10-21T23:54:28.000Z
|
2021-12-23T08:16:04.000Z
|
data/mri_data.py
|
veritas9872/fastMRI-kspace
|
4c484b3183e9f06838b5ee108af283611c2e1e77
|
[
"MIT"
] | 1
|
2020-07-11T08:05:33.000Z
|
2020-07-11T08:05:33.000Z
|
data/mri_data.py
|
veritas9872/fastMRI-kspace
|
4c484b3183e9f06838b5ee108af283611c2e1e77
|
[
"MIT"
] | 5
|
2019-11-23T14:11:54.000Z
|
2022-02-19T13:39:15.000Z
|
import pathlib
import random
from math import ceil
import h5py
from torch.utils.data import Dataset
class SliceData(Dataset):
"""
A PyTorch Dataset that provides access to MR image slices.
"""
def __init__(self, root, transform, challenge, sample_rate=1, use_gt=True):
"""
Args:
root (Path): Path to the dataset.
transform (callable): A callable object that pre-processes the raw data into
appropriate form. The transform function should take 'kspace', 'target',
'attributes', 'filename', and 'slice_num' as inputs. 'target' may be null
for test data.
challenge (str): "singlecoil" or "multicoil" depending on which challenge to use.
sample_rate (float, optional): A float between 0 and 1. This controls what fraction
of the volumes should be loaded.
use_gt (bool): Whether to load the ground truth 320x320 fully-sampled reconstructions or not.
Very useful for reducing data I/O in k-space learning.
"""
if challenge not in ('singlecoil', 'multicoil'):
raise ValueError('challenge should be either "singlecoil" or "multicoil"')
self.use_gt = use_gt
self.transform = transform
self.recons_key = 'reconstruction_esc' if challenge == 'singlecoil' else 'reconstruction_rss'
self.examples = list()
files = list(pathlib.Path(root).glob('*.h5'))
if not files: # If the list is empty for any reason
raise FileNotFoundError('Sorry! No files present in this directory. '
'Please check if your disk has been loaded.')
print(f'Initializing {root}. This might take a minute.')
if sample_rate < 1:
random.shuffle(files)
num_files = ceil(len(files) * sample_rate)
files = files[:num_files]
for file_name in sorted(files):
kspace = h5py.File(file_name, mode='r')['kspace']
num_slices = kspace.shape[0]
self.examples += [(file_name, slice_num) for slice_num in range(num_slices)]
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
file_path, slice_num = self.examples[idx]
with h5py.File(file_path, mode='r') as data:
k_slice = data['kspace'][slice_num]
if (self.recons_key in data) and self.use_gt:
target_slice = data[self.recons_key][slice_num]
else:
target_slice = None
return self.transform(k_slice, target_slice, data.attrs, file_path.name, slice_num)
class CustomSliceData(Dataset):
def __init__(self, root, transform, challenge, sample_rate=1, start_slice=0, use_gt=False):
if challenge not in ('singlecoil', 'multicoil'):
raise ValueError('challenge should be either "singlecoil" or "multicoil"')
self.use_gt = use_gt
self.transform = transform
self.recons_key = 'reconstruction_esc' if challenge == 'singlecoil' else 'reconstruction_rss'
self.examples = list()
files = list(pathlib.Path(root).iterdir())
if not files: # If the list is empty for any reason
raise FileNotFoundError('Sorry! No files present in this directory. '
'Please check if your disk has been loaded.')
print(f'Initializing {root}.')
if sample_rate < 1:
random.shuffle(files)
num_files = ceil(len(files) * sample_rate)
files = files[:num_files]
for file_name in sorted(files):
kspace = h5py.File(file_name, mode='r')['kspace']
num_slices = kspace.shape[0]
self.examples += [(file_name, slice_num) for slice_num in range(start_slice, num_slices)]
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
file_path, slice_num = self.examples[idx]
with h5py.File(file_path, mode='r') as data:
attrs = dict(data.attrs)
k_slice = data['kspace'][slice_num]
if (self.recons_key in data) and self.use_gt:
target_slice = data[self.recons_key][slice_num]
else:
target_slice = None
return self.transform(k_slice, target_slice, attrs, file_path.name, slice_num)
| 38.763158
| 105
| 0.614166
|
e84a35d1fb049f779c608e7ad2a3768b21306446
| 1,221
|
py
|
Python
|
todo-app/quarkus-todo-app/docker_build.py
|
rajesh-kumar/spring-native-quarkus
|
f0e097e17a5cc69839982ccfbf46e19378b1e6c9
|
[
"MIT"
] | null | null | null |
todo-app/quarkus-todo-app/docker_build.py
|
rajesh-kumar/spring-native-quarkus
|
f0e097e17a5cc69839982ccfbf46e19378b1e6c9
|
[
"MIT"
] | null | null | null |
todo-app/quarkus-todo-app/docker_build.py
|
rajesh-kumar/spring-native-quarkus
|
f0e097e17a5cc69839982ccfbf46e19378b1e6c9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import re
import subprocess
from pathlib import Path
import docker
def main():
parser = argparse.ArgumentParser(description='This is the docker image builder for quarkus-todo-app')
parser.add_argument("build_type", help="set build type", default='jvm', choices=['jvm', 'native'], nargs='?')
args = parser.parse_args()
print(f'build_type={args.build_type}')
build_type = args.build_type
if args.build_type == 'jvm':
java_version = re.search(r'\"(\d+\.\d+).*\"',
str(subprocess.check_output(['java', '-version'],
stderr=subprocess.STDOUT))).group(1)
if java_version.startswith('11'):
build_type = f'{build_type}11'
source_dir = Path(__file__).parent.resolve()
dockerfile = source_dir / 'src' / 'main' / 'docker' / f'Dockerfile.{build_type}'
print(f'docker_file={dockerfile}')
client = docker.from_env()
client.images.build(path=f'{source_dir}',
dockerfile=dockerfile.resolve(),
tag=f'quarkus-todo-app-{args.build_type}')
if __name__ == '__main__':
main()
| 31.307692
| 113
| 0.597871
|
e6f919a49897af93e38621494d8815a2c2f24671
| 1,456
|
py
|
Python
|
tests/test/plugin/test_coverage.py
|
b1u3h4t/brownie
|
65eefa0da40133687eede2077eaf46d3c93a05c3
|
[
"MIT"
] | null | null | null |
tests/test/plugin/test_coverage.py
|
b1u3h4t/brownie
|
65eefa0da40133687eede2077eaf46d3c93a05c3
|
[
"MIT"
] | 1
|
2021-04-11T13:59:30.000Z
|
2022-02-06T13:45:00.000Z
|
tests/test/plugin/test_coverage.py
|
b1u3h4t/brownie
|
65eefa0da40133687eede2077eaf46d3c93a05c3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import json
test_source = """
import pytest
@pytest.mark.no_call_coverage
def test_call_and_transact(BrownieTester, accounts, web3, fn_isolation):
c = accounts[0].deploy(BrownieTester, True)
c.setNum(12, {'from': accounts[0]})
assert web3.eth.blockNumber == 2
c.getTuple(accounts[0])
assert web3.eth.blockNumber == 2
def test_call_and_transact_without_decorator(BrownieTester, accounts, web3, fn_isolation):
c = accounts[0].deploy(BrownieTester, True)
c.setNum(12, {'from': accounts[0]})
assert web3.eth.blockNumber == 2
c.getTuple(accounts[0])
assert web3.eth.blockNumber == 2
"""
def test_always_transact(plugintester, mocker, chain):
mocker.spy(chain, "undo")
# without coverage eval, there should be no calls to `chain.undo`
result = plugintester.runpytest()
result.assert_outcomes(passed=2)
assert chain.undo.call_count == 0
# with coverage eval, only one of the tests should call `chain.undo`
result = plugintester.runpytest("--coverage")
result.assert_outcomes(passed=2)
assert chain.undo.call_count == 1
def test_coverage_tx(json_path, plugintester):
plugintester.runpytest("-n 2")
with json_path.open() as fp:
build = json.load(fp)
assert not len(build["tx"])
plugintester.runpytest("--numprocesses=2", "--coverage")
with json_path.open() as fp:
build = json.load(fp)
assert len(build["tx"]) == 3
| 29.714286
| 90
| 0.692995
|
1e9547b99fbdf2517c5626ee0b2ae9ee4338d0d6
| 7,453
|
py
|
Python
|
delta_node/app/v1/task.py
|
delta-mpc/delta-node
|
674fc61f951e41ed353597f93ca6ea6bc74a102b
|
[
"Apache-2.0"
] | 4
|
2021-07-22T01:11:15.000Z
|
2022-03-17T03:26:20.000Z
|
delta_node/app/v1/task.py
|
delta-mpc/delta-node
|
674fc61f951e41ed353597f93ca6ea6bc74a102b
|
[
"Apache-2.0"
] | 10
|
2021-09-13T09:55:02.000Z
|
2022-03-23T09:41:26.000Z
|
delta_node/app/v1/task.py
|
delta-mpc/delta-node
|
674fc61f951e41ed353597f93ca6ea6bc74a102b
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import logging
import math
import os
import shutil
from tempfile import TemporaryFile
from typing import IO, List, Optional
import sqlalchemy as sa
from delta_node import chain, coord, db, entity, pool, registry
from fastapi import (APIRouter, BackgroundTasks, Depends, File, HTTPException,
Query, UploadFile)
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from sqlalchemy.ext.asyncio import AsyncSession
_logger = logging.getLogger(__name__)
task_router = APIRouter(prefix="/task")
def create_task_file(task_file: IO[bytes]):
f = TemporaryFile(mode="w+b")
shutil.copyfileobj(task_file, f)
return f
def move_task_file(task_file: IO[bytes], task_id: str):
task_file.seek(0)
with open(coord.task_config_file(task_id), mode="wb") as f:
shutil.copyfileobj(task_file, f)
task_file.close()
async def run_task(id: int, task_file: IO[bytes]):
node_address = await registry.get_node_address()
async with db.session_scope() as sess:
q = sa.select(entity.Task).where(entity.Task.id == id)
task_item: entity.Task = (await sess.execute(q)).scalar_one()
tx_hash, task_id = await chain.get_client().create_task(
node_address, task_item.dataset, task_item.commitment, task_item.type
)
task_item.task_id = task_id
task_item.creator = node_address
task_item.status = entity.TaskStatus.RUNNING
sess.add(task_item)
await sess.commit()
loop = asyncio.get_running_loop()
await loop.run_in_executor(pool.IO_POOL, move_task_file, task_file, task_id)
_logger.info(
f"[Create Task] create task {task_id}", extra={"task_id": task_id, "tx_hash": tx_hash}
)
await coord.run_task(task_id)
class CreateTaskResp(BaseModel):
task_id: int
@task_router.post("", response_model=CreateTaskResp)
async def create_task(
*,
file: UploadFile = File(...),
session: AsyncSession = Depends(db.get_session),
background: BackgroundTasks,
):
loop = asyncio.get_running_loop()
f = await loop.run_in_executor(pool.IO_POOL, create_task_file, file.file)
task_item = await loop.run_in_executor(pool.IO_POOL, coord.create_task, f)
session.add(task_item)
await session.commit()
await session.refresh(task_item)
background.add_task(run_task, task_item.id, f)
return CreateTaskResp(task_id=task_item.id)
class Task(BaseModel):
id: int
created_at: int
name: str
type: str
creator: str
status: str
@task_router.get("/list", response_model=List[Task])
async def get_task_list(
task_ids: List[int] = Query(...),
session: AsyncSession = Depends(db.get_session),
):
q = sa.select(entity.Task).where(entity.Task.id.in_(task_ids)) # type: ignore
tasks: List[entity.Task] = (await session.execute(q)).scalars().all()
task_dict = {task.id: task for task in tasks}
task_items = []
for task_id in task_ids:
task = task_dict[task_id]
task_items.append(
Task(
id=task.id,
created_at=int(task.created_at.timestamp() * 1000),
name=task.name,
type=task.type,
creator=task.creator,
status=task.status.name,
)
)
return task_items
@task_router.get("/metadata", response_model=Task)
async def get_task_metadata(
task_id: int = Query(..., ge=1),
session: AsyncSession = Depends(db.get_session),
):
q = sa.select(entity.Task).where(entity.Task.id == task_id)
task: Optional[entity.Task] = (await session.execute(q)).scalar_one_or_none()
if task is None:
raise HTTPException(400, f"task {task_id} does not exist")
return Task(
id=task.id,
created_at=int(task.created_at.timestamp() * 1000),
name=task.name,
type=task.type,
creator=task.creator,
status=task.status.name,
)
def task_result_file(task_id: str) -> Optional[str]:
result_filename = coord.task_result_file(task_id)
if os.path.exists(result_filename):
return result_filename
@task_router.get("/result")
async def get_task_result(
task_id: int = Query(..., ge=1), session: AsyncSession = Depends(db.get_session)
):
q = sa.select(entity.Task).where(entity.Task.id == task_id)
task: Optional[entity.Task] = (await session.execute(q)).scalar_one_or_none()
if task is None:
raise HTTPException(400, f"task {task_id} does not exist")
loop = asyncio.get_running_loop()
result_filename = await loop.run_in_executor(
pool.IO_POOL, task_result_file, task.task_id
)
if result_filename is None:
raise HTTPException(400, f"task {task_id} is not finished")
def file_iter(filename: str):
chunk_size = 1024 * 1024
with open(filename, mode="rb") as f:
while True:
content = f.read(chunk_size)
if len(content) == 0:
break
yield content
return StreamingResponse(
file_iter(result_filename),
media_type="application/octet-stream",
headers={"Content-Disposition": f"attachment; filename={task_id}.result"},
)
class TaskLog(BaseModel):
created_at: int
message: str
tx_hash: Optional[str] = None
@task_router.get("/logs", response_model=List[TaskLog])
async def get_task_logs(
task_id: int = Query(..., ge=1),
page: int = Query(1, ge=1),
page_size: int = Query(20, gt=0),
*,
session: AsyncSession = Depends(db.get_session),
):
q = sa.select(entity.Task).where(entity.Task.id == task_id)
task: Optional[entity.Task] = (await session.execute(q)).scalar_one_or_none()
if task is None:
raise HTTPException(400, f"task {task_id} does not exist")
q = (
sa.select(entity.Record)
.where(entity.Record.task_id == task.task_id)
.order_by(entity.Record.id)
.limit(page_size)
.offset((page - 1) * page_size)
)
records: List[entity.Record] = (await session.execute(q)).scalars().all()
logs = [
TaskLog(
created_at=int(record.created_at.timestamp() * 1000),
message=record.message,
tx_hash=record.tx_hash,
)
for record in records
]
return logs
router = APIRouter()
router.include_router(task_router)
class TasksPage(BaseModel):
tasks: List[Task]
total_pages: int
@router.get("/tasks", response_model=TasksPage)
async def get_tasks(
page: int = Query(1, ge=1),
page_size: int = Query(20, gt=0),
session: AsyncSession = Depends(db.get_session),
):
q = (
sa.select(entity.Task)
.order_by(entity.Task.id)
.limit(page_size)
.offset((page - 1) * page_size)
)
tasks: List[entity.Task] = (await session.execute(q)).scalars().all()
q = sa.select(sa.func.count(entity.Task.id))
task_count = (await session.execute(q)).scalar_one()
total_pages = math.ceil(task_count / page_size)
task_items = [
Task(
id=task.id,
created_at=int(task.created_at.timestamp() * 1000),
name=task.name,
type=task.type,
creator=task.creator,
status=task.status.name,
)
for task in tasks
]
tasks_page = TasksPage(tasks=task_items, total_pages=total_pages)
return tasks_page
| 29.458498
| 94
| 0.64994
|
ccf486c696c5c952ddd29cfb6e3287dbfbd4f859
| 77,048
|
py
|
Python
|
tests/test_continuous_futures.py
|
maartenb/zipline-reloaded
|
7cd7ef496dde7b5efe5423ae6c98181a67a99c15
|
[
"Apache-2.0"
] | null | null | null |
tests/test_continuous_futures.py
|
maartenb/zipline-reloaded
|
7cd7ef496dde7b5efe5423ae6c98181a67a99c15
|
[
"Apache-2.0"
] | null | null | null |
tests/test_continuous_futures.py
|
maartenb/zipline-reloaded
|
7cd7ef496dde7b5efe5423ae6c98181a67a99c15
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import deque
from functools import partial
from textwrap import dedent
import numpy as np
from numpy.testing import assert_almost_equal
import pandas as pd
from zipline.assets.continuous_futures import OrderedContracts, delivery_predicate
from zipline.assets.roll_finder import (
ROLL_DAYS_FOR_CURRENT_CONTRACT,
VolumeRollFinder,
)
from zipline.data.minute_bars import FUTURES_MINUTES_PER_DAY
from zipline.errors import SymbolNotFound
import zipline.testing.fixtures as zf
import pytest
class ContinuousFuturesTestCase(
zf.WithCreateBarData, zf.WithMakeAlgo, zf.ZiplineTestCase
):
START_DATE = pd.Timestamp("2015-01-05", tz="UTC")
END_DATE = pd.Timestamp("2016-10-19", tz="UTC")
SIM_PARAMS_START = pd.Timestamp("2016-01-26", tz="UTC")
SIM_PARAMS_END = pd.Timestamp("2016-01-28", tz="UTC")
SIM_PARAMS_DATA_FREQUENCY = "minute"
TRADING_CALENDAR_STRS = ("us_futures",)
TRADING_CALENDAR_PRIMARY_CAL = "us_futures"
ASSET_FINDER_FUTURE_CHAIN_PREDICATES = {
"BZ": partial(delivery_predicate, set(["F", "H"])),
}
@classmethod
def make_root_symbols_info(self):
return pd.DataFrame(
{
"root_symbol": ["FOOBAR", "BZ", "MA", "DF"],
"root_symbol_id": [1, 2, 3, 4],
"exchange": ["CMES", "CMES", "CMES", "CMES"],
}
)
@classmethod
def make_futures_info(self):
fo_frame = pd.DataFrame(
{
"symbol": [
"FOOBARF16",
"FOOBARG16",
"FOOBARH16",
"FOOBARJ16",
"FOOBARK16",
"FOOBARF22",
"FOOBARG22",
],
"sid": range(0, 7),
"root_symbol": ["FOOBAR"] * 7,
"asset_name": ["Foo"] * 7,
"start_date": [
pd.Timestamp("2015-01-05", tz="UTC"),
pd.Timestamp("2015-02-05", tz="UTC"),
pd.Timestamp("2015-03-05", tz="UTC"),
pd.Timestamp("2015-04-05", tz="UTC"),
pd.Timestamp("2015-05-05", tz="UTC"),
pd.Timestamp("2021-01-05", tz="UTC"),
pd.Timestamp("2015-01-05", tz="UTC"),
],
"end_date": [
pd.Timestamp("2016-08-19", tz="UTC"),
pd.Timestamp("2016-09-19", tz="UTC"),
pd.Timestamp("2016-10-19", tz="UTC"),
pd.Timestamp("2016-11-19", tz="UTC"),
pd.Timestamp("2022-08-19", tz="UTC"),
pd.Timestamp("2022-09-19", tz="UTC"),
# Set the last contract's end date (which is the last
# date for which there is data to a value that is
# within the range of the dates being tested. This
# models real life scenarios where the end date of the
# furthest out contract is not necessarily the
# greatest end date all contracts in the chain.
pd.Timestamp("2015-02-05", tz="UTC"),
],
"notice_date": [
pd.Timestamp("2016-01-27", tz="UTC"),
pd.Timestamp("2016-02-26", tz="UTC"),
pd.Timestamp("2016-03-24", tz="UTC"),
pd.Timestamp("2016-04-26", tz="UTC"),
pd.Timestamp("2016-05-26", tz="UTC"),
pd.Timestamp("2022-01-26", tz="UTC"),
pd.Timestamp("2022-02-26", tz="UTC"),
],
"expiration_date": [
pd.Timestamp("2016-01-27", tz="UTC"),
pd.Timestamp("2016-02-26", tz="UTC"),
pd.Timestamp("2016-03-24", tz="UTC"),
pd.Timestamp("2016-04-26", tz="UTC"),
pd.Timestamp("2016-05-26", tz="UTC"),
pd.Timestamp("2022-01-26", tz="UTC"),
pd.Timestamp("2022-02-26", tz="UTC"),
],
"auto_close_date": [
pd.Timestamp("2016-01-27", tz="UTC"),
pd.Timestamp("2016-02-26", tz="UTC"),
pd.Timestamp("2016-03-24", tz="UTC"),
pd.Timestamp("2016-04-26", tz="UTC"),
pd.Timestamp("2016-05-26", tz="UTC"),
pd.Timestamp("2022-01-26", tz="UTC"),
pd.Timestamp("2022-02-26", tz="UTC"),
],
"tick_size": [0.001] * 7,
"multiplier": [1000.0] * 7,
"exchange": ["CMES"] * 7,
}
)
# BZ is set up to test chain predicates, for futures such as PL which
# only use a subset of contracts for the roll chain.
bz_frame = pd.DataFrame(
{
"symbol": ["BZF16", "BZG16", "BZH16"],
"root_symbol": ["BZ"] * 3,
"asset_name": ["Baz"] * 3,
"sid": range(10, 13),
"start_date": [
pd.Timestamp("2005-01-01", tz="UTC"),
pd.Timestamp("2005-01-21", tz="UTC"),
pd.Timestamp("2005-01-21", tz="UTC"),
],
"end_date": [
pd.Timestamp("2016-08-19", tz="UTC"),
pd.Timestamp("2016-11-21", tz="UTC"),
pd.Timestamp("2016-10-19", tz="UTC"),
],
"notice_date": [
pd.Timestamp("2016-01-11", tz="UTC"),
pd.Timestamp("2016-02-08", tz="UTC"),
pd.Timestamp("2016-03-09", tz="UTC"),
],
"expiration_date": [
pd.Timestamp("2016-01-11", tz="UTC"),
pd.Timestamp("2016-02-08", tz="UTC"),
pd.Timestamp("2016-03-09", tz="UTC"),
],
"auto_close_date": [
pd.Timestamp("2016-01-11", tz="UTC"),
pd.Timestamp("2016-02-08", tz="UTC"),
pd.Timestamp("2016-03-09", tz="UTC"),
],
"tick_size": [0.001] * 3,
"multiplier": [1000.0] * 3,
"exchange": ["CMES"] * 3,
}
)
# MA is set up to test a contract which is has no active volume.
ma_frame = pd.DataFrame(
{
"symbol": ["MAG16", "MAH16", "MAJ16"],
"root_symbol": ["MA"] * 3,
"asset_name": ["Most Active"] * 3,
"sid": range(14, 17),
"start_date": [
pd.Timestamp("2005-01-01", tz="UTC"),
pd.Timestamp("2005-01-21", tz="UTC"),
pd.Timestamp("2005-01-21", tz="UTC"),
],
"end_date": [
pd.Timestamp("2016-08-19", tz="UTC"),
pd.Timestamp("2016-11-21", tz="UTC"),
pd.Timestamp("2016-10-19", tz="UTC"),
],
"notice_date": [
pd.Timestamp("2016-02-17", tz="UTC"),
pd.Timestamp("2016-03-16", tz="UTC"),
pd.Timestamp("2016-04-13", tz="UTC"),
],
"expiration_date": [
pd.Timestamp("2016-02-17", tz="UTC"),
pd.Timestamp("2016-03-16", tz="UTC"),
pd.Timestamp("2016-04-13", tz="UTC"),
],
"auto_close_date": [
pd.Timestamp("2016-02-17", tz="UTC"),
pd.Timestamp("2016-03-16", tz="UTC"),
pd.Timestamp("2016-04-13", tz="UTC"),
],
"tick_size": [0.001] * 3,
"multiplier": [1000.0] * 3,
"exchange": ["CMES"] * 3,
}
)
# DF is set up to have a double volume flip between the 'F' and 'G'
# contracts, and then a really early temporary volume flip between the
# 'G' and 'H' contracts.
df_frame = pd.DataFrame(
{
"symbol": ["DFF16", "DFG16", "DFH16"],
"root_symbol": ["DF"] * 3,
"asset_name": ["Double Flip"] * 3,
"sid": range(17, 20),
"start_date": [
pd.Timestamp("2005-01-01", tz="UTC"),
pd.Timestamp("2005-02-01", tz="UTC"),
pd.Timestamp("2005-03-01", tz="UTC"),
],
"end_date": [
pd.Timestamp("2016-08-19", tz="UTC"),
pd.Timestamp("2016-09-19", tz="UTC"),
pd.Timestamp("2016-10-19", tz="UTC"),
],
"notice_date": [
pd.Timestamp("2016-02-19", tz="UTC"),
pd.Timestamp("2016-03-18", tz="UTC"),
pd.Timestamp("2016-04-22", tz="UTC"),
],
"expiration_date": [
pd.Timestamp("2016-02-19", tz="UTC"),
pd.Timestamp("2016-03-18", tz="UTC"),
pd.Timestamp("2016-04-22", tz="UTC"),
],
"auto_close_date": [
pd.Timestamp("2016-02-17", tz="UTC"),
pd.Timestamp("2016-03-16", tz="UTC"),
pd.Timestamp("2016-04-20", tz="UTC"),
],
"tick_size": [0.001] * 3,
"multiplier": [1000.0] * 3,
"exchange": ["CMES"] * 3,
}
)
return pd.concat([fo_frame, bz_frame, ma_frame, df_frame])
@classmethod
def make_future_minute_bar_data(cls):
tc = cls.trading_calendar
start = pd.Timestamp("2016-01-26", tz="UTC")
end = pd.Timestamp("2016-04-29", tz="UTC")
dts = tc.minutes_for_sessions_in_range(start, end)
sessions = tc.sessions_in_range(start, end)
# Generate values in the XXY.YYY space, with XX representing the
# session and Y.YYY representing the minute within the session.
# e.g. the close of the 23rd session would be 231.440.
r = 10.0
day_markers = np.repeat(
np.arange(r, r * len(sessions) + r, r), FUTURES_MINUTES_PER_DAY
)
r = 0.001
min_markers = np.tile(
np.arange(r, r * FUTURES_MINUTES_PER_DAY + r, r), len(sessions)
)
markers = day_markers + min_markers
# Volume uses a similar scheme as above but times 1000.
r = 10.0 * 1000
vol_day_markers = np.repeat(
np.arange(r, r * len(sessions) + r, r, dtype=np.int64),
FUTURES_MINUTES_PER_DAY,
)
r = 0.001 * 1000
vol_min_markers = np.tile(
np.arange(r, r * FUTURES_MINUTES_PER_DAY + r, r, dtype=np.int64),
len(sessions),
)
vol_markers = vol_day_markers + vol_min_markers
base_df = pd.DataFrame(
{
"open": np.full(len(dts), 102000.0) + markers,
"high": np.full(len(dts), 109000.0) + markers,
"low": np.full(len(dts), 101000.0) + markers,
"close": np.full(len(dts), 105000.0) + markers,
"volume": np.full(len(dts), 10000, dtype=np.int64) + vol_markers,
},
index=dts,
)
# Add the sid to the ones place of the prices, so that the ones
# place can be used to eyeball the source contract.
# For volume roll tests end sid volume early.
# FOOBARF16 cuts out day before autoclose of 01-26
# FOOBARG16 cuts out on autoclose
# FOOBARH16 cuts out 4 days before autoclose
# FOOBARJ16 cuts out 3 days before autoclose
# Make FOOBARG22 have a blip of trading, but not be the actively trading,
# so that it does not particpate in volume rolls.
sid_to_vol_stop_session = {
0: pd.Timestamp("2016-01-26", tz="UTC"),
1: pd.Timestamp("2016-02-26", tz="UTC"),
2: pd.Timestamp("2016-03-18", tz="UTC"),
3: pd.Timestamp("2016-04-20", tz="UTC"),
6: pd.Timestamp("2016-01-27", tz="UTC"),
}
for i in range(20):
df = base_df.copy()
df += i * 10000
if i in sid_to_vol_stop_session:
vol_stop_session = sid_to_vol_stop_session[i]
m_open = tc.open_and_close_for_session(vol_stop_session)[0]
loc = dts.searchsorted(m_open)
# Add a little bit of noise to roll. So that predicates that
# check for exactly 0 do not work, since there may be
# stragglers after a roll.
df.volume.values[loc] = 1000
df.volume.values[loc + 1 :] = 0
j = i - 1
if j in sid_to_vol_stop_session:
non_primary_end = sid_to_vol_stop_session[j]
m_close = tc.open_and_close_for_session(non_primary_end)[1]
if m_close > dts[0]:
loc = dts.get_loc(m_close)
# Add some volume before a roll, since a contract may be
# entered earlier than when it is the primary.
df.volume.values[: loc + 1] = 10
if i == 15: # No volume for MAH16
df.volume.values[:] = 0
if i == 17:
end_loc = dts.searchsorted(pd.Timestamp("2016-02-16 23:00:00+00:00"))
df.volume.values[:end_loc] = 10
df.volume.values[end_loc:] = 0
if i == 18:
cross_loc_1 = dts.searchsorted(
pd.Timestamp("2016-02-09 23:01:00+00:00")
)
cross_loc_2 = dts.searchsorted(
pd.Timestamp("2016-02-11 23:01:00+00:00")
)
cross_loc_3 = dts.searchsorted(
pd.Timestamp("2016-02-15 23:01:00+00:00")
)
end_loc = dts.searchsorted(pd.Timestamp("2016-03-16 23:01:00+00:00"))
df.volume.values[:cross_loc_1] = 5
df.volume.values[cross_loc_1:cross_loc_2] = 15
df.volume.values[cross_loc_2:cross_loc_3] = 5
df.volume.values[cross_loc_3:end_loc] = 15
df.volume.values[end_loc:] = 0
if i == 19:
early_cross_1 = dts.searchsorted(
pd.Timestamp("2016-03-01 23:01:00+00:00")
)
early_cross_2 = dts.searchsorted(
pd.Timestamp("2016-03-03 23:01:00+00:00")
)
end_loc = dts.searchsorted(pd.Timestamp("2016-04-19 23:01:00+00:00"))
df.volume.values[:early_cross_1] = 1
df.volume.values[early_cross_1:early_cross_2] = 20
df.volume.values[early_cross_2:end_loc] = 10
df.volume.values[end_loc:] = 0
yield i, df
def test_double_volume_switch(self):
"""
Test that when a double volume switch occurs we treat the first switch
as the roll, assuming it is within a certain distance of the next auto
close date. See `VolumeRollFinder._active_contract` for a full
explanation and example.
"""
cf = self.asset_finder.create_continuous_future(
"DF",
0,
"volume",
None,
)
sessions = self.trading_calendar.sessions_in_range(
"2016-02-09",
"2016-02-17",
)
for session in sessions:
bar_data = self.create_bardata(lambda: session)
contract = bar_data.current(cf, "contract")
# The 'G' contract surpasses the 'F' contract in volume on
# 2016-02-10, which means that the 'G' contract should become the
# front contract starting on 2016-02-11.
if session < pd.Timestamp("2016-02-11", tz="UTC"):
assert contract.symbol == "DFF16"
else:
assert contract.symbol == "DFG16"
# This test asserts behavior about a back contract briefly spiking in
# volume, but more than a week before the front contract's auto close
# date, meaning it does not fall in the 'grace' period used by
# `VolumeRollFinder._active_contract`. Therefore we should not roll to
# the back contract and the front contract should remain current until
# its auto close date.
sessions = self.trading_calendar.sessions_in_range(
"2016-03-01",
"2016-03-21",
)
for session in sessions:
bar_data = self.create_bardata(lambda: session)
contract = bar_data.current(cf, "contract")
if session < pd.Timestamp("2016-03-17", tz="UTC"):
assert contract.symbol == "DFG16"
else:
assert contract.symbol == "DFH16"
def test_create_continuous_future(self):
cf_primary = self.asset_finder.create_continuous_future(
"FOOBAR", 0, "calendar", None
)
assert cf_primary.root_symbol == "FOOBAR"
assert cf_primary.offset == 0
assert cf_primary.roll_style == "calendar"
assert cf_primary.start_date == pd.Timestamp("2015-01-05", tz="UTC")
assert cf_primary.end_date == pd.Timestamp("2022-09-19", tz="UTC")
retrieved_primary = self.asset_finder.retrieve_asset(cf_primary.sid)
assert retrieved_primary == cf_primary
cf_secondary = self.asset_finder.create_continuous_future(
"FOOBAR", 1, "calendar", None
)
assert cf_secondary.root_symbol == "FOOBAR"
assert cf_secondary.offset == 1
assert cf_secondary.roll_style == "calendar"
assert cf_primary.start_date == pd.Timestamp("2015-01-05", tz="UTC")
assert cf_primary.end_date == pd.Timestamp("2022-09-19", tz="UTC")
retrieved = self.asset_finder.retrieve_asset(cf_secondary.sid)
assert retrieved == cf_secondary
assert cf_primary != cf_secondary
# Assert that the proper exception is raised if the given root symbol
# does not exist.
with pytest.raises(SymbolNotFound):
self.asset_finder.create_continuous_future("NO", 0, "calendar", None)
def test_current_contract(self):
cf_primary = self.asset_finder.create_continuous_future(
"FOOBAR", 0, "calendar", None
)
bar_data = self.create_bardata(lambda: pd.Timestamp("2016-01-26", tz="UTC"))
contract = bar_data.current(cf_primary, "contract")
assert contract.symbol == "FOOBARF16"
bar_data = self.create_bardata(lambda: pd.Timestamp("2016-01-27", tz="UTC"))
contract = bar_data.current(cf_primary, "contract")
assert contract.symbol == "FOOBARG16", (
"Auto close at beginning of session so FOOBARG16 is now "
"the current contract."
)
def test_get_value_contract_daily(self):
cf_primary = self.asset_finder.create_continuous_future(
"FOOBAR", 0, "calendar", None
)
contract = self.data_portal.get_spot_value(
cf_primary,
"contract",
pd.Timestamp("2016-01-26", tz="UTC"),
"daily",
)
assert contract.symbol == "FOOBARF16"
contract = self.data_portal.get_spot_value(
cf_primary,
"contract",
pd.Timestamp("2016-01-27", tz="UTC"),
"daily",
)
assert contract.symbol == "FOOBARG16", (
"Auto close at beginning of session so FOOBARG16 is now "
"the current contract."
)
# Test that the current contract outside of the continuous future's
# start and end dates is None.
contract = self.data_portal.get_spot_value(
cf_primary,
"contract",
self.START_DATE - self.trading_calendar.day,
"daily",
)
assert contract is None
def test_get_value_close_daily(self):
cf_primary = self.asset_finder.create_continuous_future(
"FOOBAR", 0, "calendar", None
)
value = self.data_portal.get_spot_value(
cf_primary,
"close",
pd.Timestamp("2016-01-26", tz="UTC"),
"daily",
)
assert value == 105011.44
value = self.data_portal.get_spot_value(
cf_primary,
"close",
pd.Timestamp("2016-01-27", tz="UTC"),
"daily",
)
assert value == 115021.44, (
"Auto close at beginning of session so FOOBARG16 is now "
"the current contract."
)
# Check a value which occurs after the end date of the last known
# contract, to prevent a regression where the end date of the last
# contract was used instead of the max date of all contracts.
value = self.data_portal.get_spot_value(
cf_primary,
"close",
pd.Timestamp("2016-03-26", tz="UTC"),
"daily",
)
assert value == 135441.44, (
"Value should be for FOOBARJ16, even though last "
"contract ends before query date."
)
def test_current_contract_volume_roll(self):
cf_primary = self.asset_finder.create_continuous_future(
"FOOBAR", 0, "volume", None
)
bar_data = self.create_bardata(lambda: pd.Timestamp("2016-01-26", tz="UTC"))
contract = bar_data.current(cf_primary, "contract")
assert contract.symbol == "FOOBARF16"
bar_data = self.create_bardata(lambda: pd.Timestamp("2016-01-27", tz="UTC"))
contract = bar_data.current(cf_primary, "contract")
assert contract.symbol == "FOOBARG16", (
"Auto close at beginning of session. FOOBARG16 is now "
"the current contract."
)
bar_data = self.create_bardata(lambda: pd.Timestamp("2016-02-29", tz="UTC"))
contract = bar_data.current(cf_primary, "contract")
assert (
contract.symbol == "FOOBARH16"
), "Volume switch to FOOBARH16, should have triggered roll."
def test_current_contract_in_algo(self):
code = dedent(
"""
from zipline.api import (
record,
continuous_future,
schedule_function,
get_datetime,
)
def initialize(algo):
algo.primary_cl = continuous_future('FOOBAR', 0, 'calendar', None)
algo.secondary_cl = continuous_future('FOOBAR', 1, 'calendar', None)
schedule_function(record_current_contract)
def record_current_contract(algo, data):
record(datetime=get_datetime())
record(primary=data.current(algo.primary_cl, 'contract'))
record(secondary=data.current(algo.secondary_cl, 'contract'))
"""
)
results = self.run_algorithm(script=code)
result = results.iloc[0]
assert (
result.primary.symbol == "FOOBARF16"
), "Primary should be FOOBARF16 on first session."
assert (
result.secondary.symbol == "FOOBARG16"
), "Secondary should be FOOBARG16 on first session."
result = results.iloc[1]
# Second day, primary should switch to FOOBARG
assert result.primary.symbol == "FOOBARG16", (
"Primary should be FOOBARG16 on second session, auto "
"close is at beginning of the session."
)
assert result.secondary.symbol == "FOOBARH16", (
"Secondary should be FOOBARH16 on second session, auto "
"close is at beginning of the session."
)
result = results.iloc[2]
# Second day, primary should switch to FOOBARG
assert (
result.primary.symbol == "FOOBARG16"
), "Primary should remain as FOOBARG16 on third session."
assert (
result.secondary.symbol == "FOOBARH16"
), "Secondary should remain as FOOBARH16 on third session."
def test_current_chain_in_algo(self):
code = dedent(
"""
from zipline.api import (
record,
continuous_future,
schedule_function,
get_datetime,
)
def initialize(algo):
algo.primary_cl = continuous_future('FOOBAR', 0, 'calendar', None)
algo.secondary_cl = continuous_future('FOOBAR', 1, 'calendar', None)
schedule_function(record_current_contract)
def record_current_contract(algo, data):
record(datetime=get_datetime())
primary_chain = data.current_chain(algo.primary_cl)
secondary_chain = data.current_chain(algo.secondary_cl)
record(primary_len=len(primary_chain))
record(primary_first=primary_chain[0].symbol)
record(primary_last=primary_chain[-1].symbol)
record(secondary_len=len(secondary_chain))
record(secondary_first=secondary_chain[0].symbol)
record(secondary_last=secondary_chain[-1].symbol)
"""
)
results = self.run_algorithm(script=code)
result = results.iloc[0]
assert result.primary_len == 6, (
"There should be only 6 contracts in the chain for "
"the primary, there are 7 contracts defined in the "
"fixture, but one has a start after the simulation "
"date."
)
assert result.secondary_len == 5, (
"There should be only 5 contracts in the chain for "
"the primary, there are 7 contracts defined in the "
"fixture, but one has a start after the simulation "
"date. And the first is not included because it is "
"the primary on that date."
)
assert result.primary_first == "FOOBARF16", (
"Front of primary chain should be FOOBARF16 on first " "session."
)
assert result.secondary_first == "FOOBARG16", (
"Front of secondary chain should be FOOBARG16 on first " "session."
)
assert result.primary_last == "FOOBARG22", (
"End of primary chain should be FOOBARK16 on first " "session."
)
assert result.secondary_last == "FOOBARG22", (
"End of secondary chain should be FOOBARK16 on first " "session."
)
# Second day, primary should switch to FOOBARG
result = results.iloc[1]
assert result.primary_len == 5, (
"There should be only 5 contracts in the chain for "
"the primary, there are 7 contracts defined in the "
"fixture, but one has a start after the simulation "
"date. The first is not included because of roll."
)
assert result.secondary_len == 4, (
"There should be only 4 contracts in the chain for "
"the primary, there are 7 contracts defined in the "
"fixture, but one has a start after the simulation "
"date. The first is not included because of roll, "
"the second is the primary on that date."
)
assert result.primary_first == "FOOBARG16", (
"Front of primary chain should be FOOBARG16 on second " "session."
)
assert result.secondary_first == "FOOBARH16", (
"Front of secondary chain should be FOOBARH16 on second " "session."
)
# These values remain FOOBARJ16 because fixture data is not exhaustive
# enough to move the end of the chain.
assert result.primary_last == "FOOBARG22", (
"End of primary chain should be FOOBARK16 on second " "session."
)
assert result.secondary_last == "FOOBARG22", (
"End of secondary chain should be FOOBARK16 on second " "session."
)
def test_history_sid_session(self):
cf = self.data_portal.asset_finder.create_continuous_future(
"FOOBAR", 0, "calendar", None
)
window = self.data_portal.get_history_window(
[cf],
pd.Timestamp("2016-03-04 18:01", tz="US/Eastern").tz_convert("UTC"),
30,
"1d",
"sid",
"minute",
)
assert (
window.loc["2016-01-26", cf] == 0
), "Should be FOOBARF16 at beginning of window."
assert (
window.loc["2016-01-27", cf] == 1
), "Should be FOOBARG16 after first roll."
assert (
window.loc["2016-02-25", cf] == 1
), "Should be FOOBARG16 on session before roll."
assert (
window.loc["2016-02-26", cf] == 2
), "Should be FOOBARH16 on session with roll."
assert (
window.loc["2016-02-29", cf] == 2
), "Should be FOOBARH16 on session after roll."
# Advance the window a month.
window = self.data_portal.get_history_window(
[cf],
pd.Timestamp("2016-04-06 18:01", tz="US/Eastern").tz_convert("UTC"),
30,
"1d",
"sid",
"minute",
)
assert (
window.loc["2016-02-25", cf] == 1
), "Should be FOOBARG16 at beginning of window."
assert (
window.loc["2016-02-26", cf] == 2
), "Should be FOOBARH16 on session with roll."
assert (
window.loc["2016-02-29", cf] == 2
), "Should be FOOBARH16 on session after roll."
assert (
window.loc["2016-03-24", cf] == 3
), "Should be FOOBARJ16 on session with roll."
assert (
window.loc["2016-03-28", cf] == 3
), "Should be FOOBARJ16 on session after roll."
def test_history_sid_session_delivery_predicate(self):
cf = self.data_portal.asset_finder.create_continuous_future(
"BZ", 0, "calendar", None
)
window = self.data_portal.get_history_window(
[cf],
pd.Timestamp("2016-01-11 18:01", tz="US/Eastern").tz_convert("UTC"),
3,
"1d",
"sid",
"minute",
)
assert (
window.loc["2016-01-08", cf] == 10
), "Should be BZF16 at beginning of window."
assert window.loc["2016-01-11", cf] == 12, (
"Should be BZH16 after first roll, having skipped " "over BZG16."
)
assert window.loc["2016-01-12", cf] == 12, "Should have remained BZG16"
def test_history_sid_session_secondary(self):
cf = self.data_portal.asset_finder.create_continuous_future(
"FOOBAR", 1, "calendar", None
)
window = self.data_portal.get_history_window(
[cf],
pd.Timestamp("2016-03-04 18:01", tz="US/Eastern").tz_convert("UTC"),
30,
"1d",
"sid",
"minute",
)
assert (
window.loc["2016-01-26", cf] == 1
), "Should be FOOBARG16 at beginning of window."
assert (
window.loc["2016-01-27", cf] == 2
), "Should be FOOBARH16 after first roll."
assert (
window.loc["2016-02-25", cf] == 2
), "Should be FOOBARH16 on session before roll."
assert (
window.loc["2016-02-26", cf] == 3
), "Should be FOOBARJ16 on session with roll."
assert (
window.loc["2016-02-29", cf] == 3
), "Should be FOOBARJ16 on session after roll."
# Advance the window a month.
window = self.data_portal.get_history_window(
[cf],
pd.Timestamp("2016-04-06 18:01", tz="US/Eastern").tz_convert("UTC"),
30,
"1d",
"sid",
"minute",
)
assert (
window.loc["2016-02-25", cf] == 2
), "Should be FOOBARH16 at beginning of window."
assert (
window.loc["2016-02-26", cf] == 3
), "Should be FOOBARJ16 on session with roll."
assert (
window.loc["2016-02-29", cf] == 3
), "Should be FOOBARJ16 on session after roll."
assert (
window.loc["2016-03-24", cf] == 4
), "Should be FOOBARK16 on session with roll."
assert (
window.loc["2016-03-28", cf] == 4
), "Should be FOOBARK16 on session after roll."
def test_history_sid_session_volume_roll(self):
cf = self.data_portal.asset_finder.create_continuous_future(
"FOOBAR", 0, "volume", None
)
window = self.data_portal.get_history_window(
[cf],
pd.Timestamp("2016-03-04 18:01", tz="US/Eastern").tz_convert("UTC"),
30,
"1d",
"sid",
"minute",
)
# Volume cuts out for FOOBARF16 on 2016-01-25
assert (
window.loc["2016-01-26", cf] == 0
), "Should be FOOBARF16 at beginning of window."
assert window.loc["2016-01-27", cf] == 1, "Should have rolled to FOOBARG16."
assert (
window.loc["2016-02-26", cf] == 1
), "Should be FOOBARG16 on session before roll."
assert (
window.loc["2016-02-29", cf] == 2
), "Should be FOOBARH16 on session with roll."
assert (
window.loc["2016-03-01", cf] == 2
), "Should be FOOBARH16 on session after roll."
# Advance the window a month.
window = self.data_portal.get_history_window(
[cf],
pd.Timestamp("2016-04-06 18:01", tz="US/Eastern").tz_convert("UTC"),
30,
"1d",
"sid",
"minute",
)
assert (
window.loc["2016-02-26", cf] == 1
), "Should be FOOBARG16 at beginning of window."
assert window.loc["2016-02-29", cf] == 2, "Should be FOOBARH16 on roll session."
assert window.loc["2016-03-01", cf] == 2, "Should remain FOOBARH16."
assert (
window.loc["2016-03-17", cf] == 2
), "Should be FOOBARH16 on session before volume cuts out."
assert window.loc["2016-03-18", cf] == 2, (
"Should be FOOBARH16 on session where the volume of "
"FOOBARH16 cuts out, the roll is upcoming."
)
assert window.loc["2016-03-24", cf] == 3, "Should have rolled to FOOBARJ16."
assert window.loc["2016-03-28", cf] == 3, "Should have remained FOOBARJ16."
def test_history_sid_minute(self):
cf = self.data_portal.asset_finder.create_continuous_future(
"FOOBAR", 0, "calendar", None
)
window = self.data_portal.get_history_window(
[cf.sid],
pd.Timestamp("2016-01-26 18:01", tz="US/Eastern").tz_convert("UTC"),
30,
"1m",
"sid",
"minute",
)
assert window.loc[pd.Timestamp("2016-01-26 22:32", tz="UTC"), cf.sid] == 0, (
"Should be FOOBARF16 at beginning of window. A minute "
"which is in the 01-26 session, before the roll."
)
assert (
window.loc[pd.Timestamp("2016-01-26 23:00", tz="UTC"), cf.sid] == 0
), "Should be FOOBARF16 on on minute before roll minute."
assert (
window.loc[pd.Timestamp("2016-01-26 23:01", tz="UTC"), cf.sid] == 1
), "Should be FOOBARG16 on minute after roll."
# Advance the window a day.
window = self.data_portal.get_history_window(
[cf],
pd.Timestamp("2016-01-27 18:01", tz="US/Eastern").tz_convert("UTC"),
30,
"1m",
"sid",
"minute",
)
assert (
window.loc[pd.Timestamp("2016-01-27 22:32", tz="UTC"), cf.sid] == 1
), "Should be FOOBARG16 at beginning of window."
assert (
window.loc[pd.Timestamp("2016-01-27 23:01", tz="UTC"), cf.sid] == 1
), "Should remain FOOBARG16 on next session."
def test_history_close_session(self):
cf = self.data_portal.asset_finder.create_continuous_future(
"FOOBAR", 0, "calendar", None
)
window = self.data_portal.get_history_window(
[cf.sid], pd.Timestamp("2016-03-06", tz="UTC"), 30, "1d", "close", "daily"
)
assert_almost_equal(
window.loc[pd.Timestamp("2016-01-26", tz="UTC"), cf.sid],
105011.440,
err_msg="At beginning of window, should be FOOBARG16's first value.",
)
assert_almost_equal(
window.loc[pd.Timestamp("2016-02-26", tz="UTC"), cf.sid],
125241.440,
err_msg="On session with roll, should be FOOBARH16's 24th value.",
)
assert_almost_equal(
window.loc[pd.Timestamp("2016-02-29", tz="UTC"), cf.sid],
125251.440,
err_msg="After roll, Should be FOOBARH16's 25th value.",
)
# Advance the window a month.
window = self.data_portal.get_history_window(
[cf.sid], pd.Timestamp("2016-04-06", tz="UTC"), 30, "1d", "close", "daily"
)
assert_almost_equal(
window.loc[pd.Timestamp("2016-02-24", tz="UTC"), cf.sid],
115221.440,
err_msg="At beginning of window, should be FOOBARG16's 22nd value.",
)
assert_almost_equal(
window.loc[pd.Timestamp("2016-02-26", tz="UTC"), cf.sid],
125241.440,
err_msg="On session with roll, should be FOOBARH16's 24th value.",
)
assert_almost_equal(
window.loc[pd.Timestamp("2016-02-29", tz="UTC"), cf.sid],
125251.440,
err_msg="On session after roll, should be FOOBARH16's 25th value.",
)
assert_almost_equal(
window.loc[pd.Timestamp("2016-03-24", tz="UTC"), cf.sid],
135431.440,
err_msg="On session with roll, should be FOOBARJ16's 43rd value.",
)
assert_almost_equal(
window.loc[pd.Timestamp("2016-03-28", tz="UTC"), cf.sid],
135441.440,
err_msg="On session after roll, Should be FOOBARJ16's 44th value.",
)
def test_history_close_session_skip_volume(self):
cf = self.data_portal.asset_finder.create_continuous_future(
"MA", 0, "volume", None
)
window = self.data_portal.get_history_window(
[cf.sid], pd.Timestamp("2016-03-06", tz="UTC"), 30, "1d", "close", "daily"
)
assert_almost_equal(
window.loc[pd.Timestamp("2016-01-26", tz="UTC"), cf.sid],
245011.440,
err_msg="At beginning of window, should be MAG16's first value.",
)
assert_almost_equal(
window.loc[pd.Timestamp("2016-02-26", tz="UTC"), cf.sid],
265241.440,
err_msg="Should have skipped MAH16 to MAJ16.",
)
assert_almost_equal(
window.loc[pd.Timestamp("2016-02-29", tz="UTC"), cf.sid],
265251.440,
err_msg="Should have remained MAJ16.",
)
# Advance the window a month.
window = self.data_portal.get_history_window(
[cf.sid], pd.Timestamp("2016-04-06", tz="UTC"), 30, "1d", "close", "daily"
)
assert_almost_equal(
window.loc[pd.Timestamp("2016-02-24", tz="UTC"), cf.sid],
265221.440,
err_msg="Should be MAJ16, having skipped MAH16.",
)
assert_almost_equal(
window.loc[pd.Timestamp("2016-02-29", tz="UTC"), cf.sid],
265251.440,
err_msg="Should be MAJ1 for rest of window.",
)
assert_almost_equal(
window.loc[pd.Timestamp("2016-03-24", tz="UTC"), cf.sid],
265431.440,
err_msg="Should be MAJ16 for rest of window.",
)
def test_history_close_session_adjusted(self):
cf = self.data_portal.asset_finder.create_continuous_future(
"FOOBAR", 0, "calendar", None
)
cf_mul = self.data_portal.asset_finder.create_continuous_future(
"FOOBAR", 0, "calendar", "mul"
)
cf_add = self.data_portal.asset_finder.create_continuous_future(
"FOOBAR", 0, "calendar", "add"
)
window = self.data_portal.get_history_window(
[cf, cf_mul, cf_add],
pd.Timestamp("2016-03-06", tz="UTC"),
30,
"1d",
"close",
"daily",
)
# Unadjusted value is: 115011.44
# Adjustment is based on hop from 115231.44 to 125231.44
# a ratio of ~0.920
assert_almost_equal(
window.loc["2016-01-26", cf_mul],
124992.348,
err_msg="At beginning of window, should be FOOBARG16's first value, "
"adjusted.",
)
# Difference of 7008.561
assert_almost_equal(
window.loc["2016-01-26", cf_add],
125011.44,
err_msg="At beginning of window, should be FOOBARG16's first value, "
"adjusted.",
)
assert_almost_equal(
window.loc["2016-02-26", cf_mul],
125241.440,
err_msg="On session with roll, should be FOOBARH16's 24th value, "
"unadjusted.",
)
assert_almost_equal(
window.loc["2016-02-26", cf_add],
125241.440,
err_msg="On session with roll, should be FOOBARH16's 24th value, "
"unadjusted.",
)
assert_almost_equal(
window.loc["2016-02-29", cf_mul],
125251.440,
err_msg="After roll, Should be FOOBARH16's 25th value, unadjusted.",
)
assert_almost_equal(
window.loc["2016-02-29", cf_add],
125251.440,
err_msg="After roll, Should be FOOBARH16's 25th value, unadjusted.",
)
# Advance the window a month.
window = self.data_portal.get_history_window(
[cf, cf_mul, cf_add],
pd.Timestamp("2016-04-06", tz="UTC"),
30,
"1d",
"close",
"daily",
)
# Unadjusted value: 115221.44
# Adjustments based on hops:
# 2016-02-25 00:00:00+00:00
# front 115231.440
# back 125231.440
# ratio: ~0.920
# difference: 10000.0
# and
# 2016-03-23 00:00:00+00:00
# front 125421.440
# back 135421.440
# ratio: ~1.080
# difference: 10000.00
assert_almost_equal(
window.loc["2016-02-24", cf_mul],
135236.905,
err_msg="At beginning of window, should be FOOBARG16's 22nd value, "
"with two adjustments.",
)
assert_almost_equal(
window.loc["2016-02-24", cf_add],
135251.44,
err_msg="At beginning of window, should be FOOBARG16's 22nd value, "
"with two adjustments",
)
# Unadjusted: 125241.44
assert_almost_equal(
window.loc["2016-02-26", cf_mul],
135259.442,
err_msg="On session with roll, should be FOOBARH16's 24th value, "
"with one adjustment.",
)
assert_almost_equal(
window.loc["2016-02-26", cf_add],
135271.44,
err_msg="On session with roll, should be FOOBARH16's 24th value, "
"with one adjustment.",
)
# Unadjusted: 125251.44
assert_almost_equal(
window.loc["2016-02-29", cf_mul],
135270.241,
err_msg="On session after roll, should be FOOBARH16's 25th value, "
"with one adjustment.",
)
assert_almost_equal(
window.loc["2016-02-29", cf_add],
135281.44,
err_msg="On session after roll, should be FOOBARH16's 25th value, "
"unadjusted.",
)
# Unadjusted: 135431.44
assert_almost_equal(
window.loc["2016-03-24", cf_mul],
135431.44,
err_msg="On session with roll, should be FOOBARJ16's 43rd value, "
"unadjusted.",
)
assert_almost_equal(
window.loc["2016-03-24", cf_add],
135431.44,
err_msg="On session with roll, should be FOOBARJ16's 43rd value.",
)
# Unadjusted: 135441.44
assert_almost_equal(
window.loc["2016-03-28", cf_mul],
135441.44,
err_msg="On session after roll, Should be FOOBARJ16's 44th value.",
)
assert_almost_equal(
window.loc["2016-03-28", cf_add],
135441.44,
err_msg="On session after roll, Should be FOOBARJ16's 44th value.",
)
def test_history_close_minute(self):
cf = self.data_portal.asset_finder.create_continuous_future(
"FOOBAR", 0, "calendar", None
)
window = self.data_portal.get_history_window(
[cf.sid],
pd.Timestamp("2016-02-25 18:01", tz="US/Eastern").tz_convert("UTC"),
30,
"1m",
"close",
"minute",
)
assert (
window.loc[pd.Timestamp("2016-02-25 22:32", tz="UTC"), cf.sid] == 115231.412
), (
"Should be FOOBARG16 at beginning of window. A minute "
"which is in the 02-25 session, before the roll."
)
assert (
window.loc[pd.Timestamp("2016-02-25 23:00", tz="UTC"), cf.sid] == 115231.440
), "Should be FOOBARG16 on on minute before roll minute."
assert (
window.loc[pd.Timestamp("2016-02-25 23:01", tz="UTC"), cf.sid] == 125240.001
), "Should be FOOBARH16 on minute after roll."
# Advance the window a session.
window = self.data_portal.get_history_window(
[cf],
pd.Timestamp("2016-02-28 18:01", tz="US/Eastern").tz_convert("UTC"),
30,
"1m",
"close",
"minute",
)
assert (
window.loc["2016-02-26 22:32", cf] == 125241.412
), "Should be FOOBARH16 at beginning of window."
assert (
window.loc["2016-02-28 23:01", cf] == 125250.001
), "Should remain FOOBARH16 on next session."
def test_history_close_minute_adjusted(self):
cf = self.data_portal.asset_finder.create_continuous_future(
"FOOBAR", 0, "calendar", None
)
cf_mul = self.data_portal.asset_finder.create_continuous_future(
"FOOBAR", 0, "calendar", "mul"
)
cf_add = self.data_portal.asset_finder.create_continuous_future(
"FOOBAR", 0, "calendar", "add"
)
window = self.data_portal.get_history_window(
[cf, cf_mul, cf_add],
pd.Timestamp("2016-02-25 18:01", tz="US/Eastern").tz_convert("UTC"),
30,
"1m",
"close",
"minute",
)
# Unadjusted: 115231.412
# Adjustment based on roll:
# 2016-02-25 23:00:00+00:00
# front: 115231.440
# back: 125231.440
# Ratio: ~0.920
# Difference: 10000.00
assert window.loc["2016-02-25 22:32", cf_mul] == 125231.41, (
"Should be FOOBARG16 at beginning of window. A minute "
"which is in the 02-25 session, before the roll."
)
assert window.loc["2016-02-25 22:32", cf_add] == 125231.412, (
"Should be FOOBARG16 at beginning of window. A minute "
"which is in the 02-25 session, before the roll."
)
# Unadjusted: 115231.44
# Should use same ratios as above.
assert window.loc["2016-02-25 23:00", cf_mul] == 125231.44, (
"Should be FOOBARG16 on on minute before roll minute, " "adjusted."
)
assert window.loc["2016-02-25 23:00", cf_add] == 125231.44, (
"Should be FOOBARG16 on on minute before roll minute, " "adjusted."
)
assert (
window.loc["2016-02-25 23:01", cf_mul] == 125240.001
), "Should be FOOBARH16 on minute after roll, unadjusted."
assert (
window.loc["2016-02-25 23:01", cf_add] == 125240.001
), "Should be FOOBARH16 on minute after roll, unadjusted."
# Advance the window a session.
window = self.data_portal.get_history_window(
[cf, cf_mul, cf_add],
pd.Timestamp("2016-02-28 18:01", tz="US/Eastern").tz_convert("UTC"),
30,
"1m",
"close",
"minute",
)
# No adjustments in this window.
assert (
window.loc["2016-02-26 22:32", cf_mul] == 125241.412
), "Should be FOOBARH16 at beginning of window."
assert (
window.loc["2016-02-28 23:01", cf_mul] == 125250.001
), "Should remain FOOBARH16 on next session."
def test_history_close_minute_adjusted_volume_roll(self):
cf = self.data_portal.asset_finder.create_continuous_future(
"FOOBAR", 0, "volume", None
)
cf_mul = self.data_portal.asset_finder.create_continuous_future(
"FOOBAR", 0, "volume", "mul"
)
cf_add = self.data_portal.asset_finder.create_continuous_future(
"FOOBAR", 0, "volume", "add"
)
window = self.data_portal.get_history_window(
[cf, cf_mul, cf_add],
pd.Timestamp("2016-02-28 18:01", tz="US/Eastern").tz_convert("UTC"),
30,
"1m",
"close",
"minute",
)
# Unadjusted: 115241.412
# Adjustment based on roll:
# 2016-02-25 23:00:00+00:00
# front: 115241.440 (FOG16)
# back: 125241.440 (FOH16)
# Ratio: ~0.920
# Difference: 10000.00
assert window.loc["2016-02-26 22:32", cf_mul] == 125242.973, (
"Should be FOOBARG16 at beginning of window. A minute "
"which is in the 02-25 session, before the roll."
)
assert window.loc["2016-02-26 22:32", cf_add] == 125242.851, (
"Should be FOOBARG16 at beginning of window. A minute "
"which is in the 02-25 session, before the roll."
)
# Unadjusted: 115231.44
# Should use same ratios as above.
assert window.loc["2016-02-26 23:00", cf_mul] == 125243.004, (
"Should be FOOBARG16 on minute before roll minute, " "adjusted."
)
assert window.loc["2016-02-26 23:00", cf_add] == 125242.879, (
"Should be FOOBARG16 on minute before roll minute, " "adjusted."
)
assert (
window.loc["2016-02-28 23:01", cf_mul] == 125250.001
), "Should be FOOBARH16 on minute after roll, unadjusted."
assert (
window.loc["2016-02-28 23:01", cf_add] == 125250.001
), "Should be FOOBARH16 on minute after roll, unadjusted."
# Advance the window a session.
window = self.data_portal.get_history_window(
[cf, cf_mul, cf_add],
pd.Timestamp("2016-02-29 18:01", tz="US/Eastern").tz_convert("UTC"),
30,
"1m",
"close",
"minute",
)
# No adjustments in this window.
assert (
window.loc["2016-02-29 22:32", cf_mul] == 125251.412
), "Should be FOOBARH16 at beginning of window."
assert (
window.loc["2016-02-29 23:01", cf_mul] == 125260.001
), "Should remain FOOBARH16 on next session."
class RollFinderTestCase(zf.WithBcolzFutureDailyBarReader, zf.ZiplineTestCase):
START_DATE = pd.Timestamp("2017-01-03", tz="UTC")
END_DATE = pd.Timestamp("2017-05-23", tz="UTC")
TRADING_CALENDAR_STRS = ("us_futures",)
TRADING_CALENDAR_PRIMARY_CAL = "us_futures"
@classmethod
def init_class_fixtures(cls):
super(RollFinderTestCase, cls).init_class_fixtures()
cls.volume_roll_finder = VolumeRollFinder(
cls.trading_calendar,
cls.asset_finder,
cls.bcolz_future_daily_bar_reader,
)
@classmethod
def make_futures_info(cls):
day = cls.trading_calendar.day
two_days = 2 * day
end_buffer_days = ROLL_DAYS_FOR_CURRENT_CONTRACT * day
cls.first_end_date = pd.Timestamp("2017-01-20", tz="UTC")
cls.second_end_date = pd.Timestamp("2017-02-17", tz="UTC")
cls.third_end_date = pd.Timestamp("2017-03-17", tz="UTC")
cls.third_auto_close_date = cls.third_end_date - two_days
cls.fourth_start_date = cls.third_auto_close_date - two_days
cls.fourth_end_date = pd.Timestamp("2017-04-17", tz="UTC")
cls.fourth_auto_close_date = cls.fourth_end_date + two_days
cls.fifth_start_date = pd.Timestamp("2017-03-15", tz="UTC")
cls.fifth_end_date = cls.END_DATE
cls.fifth_auto_close_date = cls.fifth_end_date - two_days
cls.last_start_date = cls.fourth_end_date
return pd.DataFrame.from_dict(
{
1000: {
"symbol": "CLF17",
"root_symbol": "CL",
"start_date": cls.START_DATE,
"end_date": cls.first_end_date,
"auto_close_date": cls.first_end_date - two_days,
"exchange": "CMES",
},
1001: {
"symbol": "CLG17",
"root_symbol": "CL",
"start_date": cls.START_DATE,
"end_date": cls.second_end_date,
"auto_close_date": cls.second_end_date - two_days,
"exchange": "CMES",
},
1002: {
"symbol": "CLH17",
"root_symbol": "CL",
"start_date": cls.START_DATE,
"end_date": cls.third_end_date,
"auto_close_date": cls.third_auto_close_date,
"exchange": "CMES",
},
1003: {
"symbol": "CLJ17",
"root_symbol": "CL",
"start_date": cls.fourth_start_date,
"end_date": cls.fourth_end_date,
"auto_close_date": cls.fourth_auto_close_date,
"exchange": "CMES",
},
1004: {
"symbol": "CLK17",
"root_symbol": "CL",
"start_date": cls.fifth_start_date,
"end_date": cls.fifth_end_date,
"auto_close_date": cls.fifth_auto_close_date,
"exchange": "CMES",
},
1005: {
"symbol": "CLM17",
"root_symbol": "CL",
"start_date": cls.last_start_date,
"end_date": cls.END_DATE,
"auto_close_date": cls.END_DATE + two_days,
"exchange": "CMES",
},
1006: {
"symbol": "CLN17",
"root_symbol": "CL",
"start_date": cls.last_start_date,
"end_date": cls.END_DATE,
"auto_close_date": cls.END_DATE + two_days,
"exchange": "CMES",
},
2000: {
# Using a placeholder month of 'A' to mean this is the
# first contract in the chain.
"symbol": "FVA17",
"root_symbol": "FV",
"start_date": cls.START_DATE,
"end_date": cls.END_DATE + end_buffer_days,
"auto_close_date": cls.END_DATE + two_days,
"exchange": "CMES",
},
2001: {
# Using a placeholder month of 'B' to mean this is the
# second contract in the chain.
"symbol": "FVB17",
"root_symbol": "FV",
"start_date": cls.START_DATE,
"end_date": cls.END_DATE + end_buffer_days,
"auto_close_date": cls.END_DATE + end_buffer_days,
"exchange": "CMES",
},
},
orient="index",
)
@classmethod
def make_future_daily_bar_data(cls):
"""
Volume data should look like this:
CLF17 CLG17 CLH17 CLJ17 CLK17 CLM17 CLN17
2017-01-03 2000 1000 5 0 0 0 0
2017-01-04 2000 1000 5 0 0 0 0
...
2017-01-16 2000 1000 5 0 0 0 0
2017-01-17 2000 1000 5 0 0 0 0
ACD -> 2017-01-18 2000_ 1000 5 0 0 0 0
2017-01-19 2000 `-> 1000 5 0 0 0 0
2017-01-20 2000 1000 5 0 0 0 0
2017-01-23 0 1000 5 0 0 0 0
...
2017-02-09 0 1000 5 0 0 0 0
2017-02-10 0 1000_ 5000 0 0 0 0
2017-02-13 0 1000 `-> 5000 0 0 0 0
2017-02-14 0 1000 5000 0 0 0 0
ACD -> 2017-02-15 0 1000 5000 0 0 0 0
2017-02-16 0 1000 5000 0 0 0 0
2017-02-17 0 1000 5000 0 0 0 0
2017-02-20 0 0 5000 0 0 0 0
...
2017-03-10 0 0 5000 0 0 0 0
2017-03-13 0 0 5000 4000 0 0 0
2017-03-14 0 0 5000 4000 0 0 0
ACD -> 2017-03-15 0 0 5000_ 4000 3000 0 0
2017-03-16 0 0 5000 `-> 4000 3000 0 0
2017-03-17 0 0 5000 4000 3000 0 0
2017-03-20 0 0 0 4000 3000 0 0
...
2017-04-14 0 0 0 4000 3000 0 0
2017-04-17 0 0 0 4000_ 3000 0 0
2017-04-18 0 0 0 0 `-> 3000 0 0
ACD -> 2017-04-19 0 0 0 0 3000 1000 2000
2017-04-20 0 0 0 0 3000 1000 2000
2017-04-21 0 0 0 0 3000 1000 2000
...
2017-05-16 0 0 0 0 3000 1000 2000
2017-05-17 0 0 0 0 3000 1000 2000
2017-05-18 0 0 0 0 3000_ 1000 2000
ACD -> 2017-05-19 0 0 0 0 3000 `---1000--> 2000
2017-05-22 0 0 0 0 3000 1000 2000
2017-05-23 0 0 0 0 3000 1000 2000
The first roll occurs because we reach the auto close date of CLF17.
The second roll occurs because the volume of CLH17 overtakes CLG17.
The third roll is testing the fact that CLJ17 has no data in the grace
period before CLH17's auto close date.
The fourth roll is testing that we properly handle the case where a
contract's auto close date is *after* its end date.
The fifth roll occurs on the auto close date of CLK17, but we skip over
CLM17 because of it's low volume, and roll directly to CLN17. This is
used to cover an edge case where the window passed to get_rolls end on
the auto close date of CLK17.
A volume of zero here is used to represent the fact that a contract no
longer exists.
"""
date_index = cls.trading_calendar.sessions_in_range(
cls.START_DATE,
cls.END_DATE,
)
def create_contract_data(volume):
# The prices used here are arbitrary as they are irrelevant for the
# purpose of testing roll behavior.
return pd.DataFrame(
{"open": 5, "high": 6, "low": 4, "close": 5, "volume": volume},
index=date_index,
)
# Make a copy because we are taking a slice of a data frame.
first_contract_data = create_contract_data(2000)
yield 1000, first_contract_data.copy().loc[: cls.first_end_date]
# Make a copy because we are taking a slice of a data frame.
second_contract_data = create_contract_data(1000)
yield 1001, second_contract_data.copy().loc[: cls.second_end_date]
third_contract_data = create_contract_data(5)
volume_flip_date = pd.Timestamp("2017-02-10", tz="UTC")
third_contract_data.loc[volume_flip_date:, "volume"] = 5000
yield 1002, third_contract_data
# Make a copy because we are taking a slice of a data frame.
fourth_contract_data = create_contract_data(4000)
yield (
1003,
fourth_contract_data.copy().loc[
cls.fourth_start_date : cls.fourth_end_date
],
)
# Make a copy because we are taking a slice of a data frame.
fifth_contract_data = create_contract_data(3000)
yield 1004, fifth_contract_data.copy().loc[cls.fifth_start_date :]
sixth_contract_data = create_contract_data(1000)
yield 1005, sixth_contract_data.copy().loc[cls.last_start_date :]
seventh_contract_data = create_contract_data(2000)
yield 1006, seventh_contract_data.copy().loc[cls.last_start_date :]
# The data for FV does not really matter except that contract 2000 has
# higher volume than contract 2001.
yield 2000, create_contract_data(200)
yield 2001, create_contract_data(100)
def test_volume_roll(self):
"""
Test normally behaving rolls.
"""
rolls = self.volume_roll_finder.get_rolls(
root_symbol="CL",
start=self.START_DATE + self.trading_calendar.day,
end=self.second_end_date,
offset=0,
)
assert rolls == [
(1000, pd.Timestamp("2017-01-19", tz="UTC")),
(1001, pd.Timestamp("2017-02-13", tz="UTC")),
(1002, None),
]
def test_no_roll(self):
# If we call 'get_rolls' with start and end dates that do not have any
# rolls between them, we should still expect the last roll date to be
# computed successfully.
date_not_near_roll = pd.Timestamp("2017-02-01", tz="UTC")
rolls = self.volume_roll_finder.get_rolls(
root_symbol="CL",
start=date_not_near_roll,
end=date_not_near_roll + self.trading_calendar.day,
offset=0,
)
assert rolls == [(1001, None)]
def test_roll_in_grace_period(self):
"""
The volume roll finder can look for data up to a week before the given
date. This test asserts that we not only return the correct active
contract during that previous week (grace period), but also that we do
not go into exception if one of the contracts does not exist.
"""
rolls = self.volume_roll_finder.get_rolls(
root_symbol="CL",
start=self.second_end_date,
end=self.third_end_date,
offset=0,
)
assert rolls == [
(1002, pd.Timestamp("2017-03-16", tz="UTC")),
(1003, None),
]
def test_end_before_auto_close(self):
# Test that we correctly roll from CLJ17 (1003) to CLK17 (1004) even
# though CLJ17 has an auto close date after its end date.
rolls = self.volume_roll_finder.get_rolls(
root_symbol="CL",
start=self.fourth_start_date,
end=self.fourth_auto_close_date,
offset=0,
)
assert rolls == [
(1002, pd.Timestamp("2017-03-16", tz="UTC")),
(1003, pd.Timestamp("2017-04-18", tz="UTC")),
(1004, None),
]
def test_roll_window_ends_on_auto_close(self):
"""
Test that when skipping over a low volume contract (CLM17), we use the
correct roll date for the previous contract (CLK17) when that
contract's auto close date falls on the end date of the roll window.
"""
rolls = self.volume_roll_finder.get_rolls(
root_symbol="CL",
start=self.last_start_date,
end=self.fifth_auto_close_date,
offset=0,
)
assert rolls == [
(1003, pd.Timestamp("2017-04-18", tz="UTC")),
(1004, pd.Timestamp("2017-05-19", tz="UTC")),
(1006, None),
]
def test_get_contract_center(self):
asset_finder = self.asset_finder
get_contract_center = partial(
self.volume_roll_finder.get_contract_center,
offset=0,
)
# Test that the current contract adheres to the rolls.
assert get_contract_center(
"CL", dt=pd.Timestamp("2017-01-18", tz="UTC")
) == asset_finder.retrieve_asset(1000)
assert get_contract_center(
"CL", dt=pd.Timestamp("2017-01-19", tz="UTC")
) == asset_finder.retrieve_asset(1001)
# Test that we still get the correct current contract close to or at
# the max day boundary. Contracts 2000 and 2001 both have auto close
# dates after `self.END_DATE` so 2000 should always be the current
# contract. However, they do not have any volume data after this point
# so this test ensures that we do not fail to calculate the forward
# looking rolls required for `VolumeRollFinder.get_contract_center`.
near_end = self.END_DATE - self.trading_calendar.day
assert get_contract_center("FV", dt=near_end) == asset_finder.retrieve_asset(
2000
)
assert get_contract_center(
"FV", dt=self.END_DATE
) == asset_finder.retrieve_asset(2000)
class OrderedContractsTestCase(zf.WithAssetFinder, zf.ZiplineTestCase):
@classmethod
def make_root_symbols_info(self):
return pd.DataFrame(
{
"root_symbol": ["FOOBAR", "BA", "BZ"],
"root_symbol_id": [1, 2, 3],
"exchange": ["CMES", "CMES", "CMES"],
}
)
@classmethod
def make_futures_info(self):
fo_frame = pd.DataFrame(
{
"root_symbol": ["FOOBAR"] * 4,
"asset_name": ["Foo"] * 4,
"symbol": ["FOOBARF16", "FOOBARG16", "FOOBARH16", "FOOBARJ16"],
"sid": range(1, 5),
"start_date": pd.date_range("2015-01-01", periods=4, tz="UTC"),
"end_date": pd.date_range("2016-01-01", periods=4, tz="UTC"),
"notice_date": pd.date_range("2016-01-01", periods=4, tz="UTC"),
"expiration_date": pd.date_range("2016-01-01", periods=4, tz="UTC"),
"auto_close_date": pd.date_range("2016-01-01", periods=4, tz="UTC"),
"tick_size": [0.001] * 4,
"multiplier": [1000.0] * 4,
"exchange": ["CMES"] * 4,
}
)
# BA is set up to test a quarterly roll, to test Eurodollar-like
# behavior
# The roll should go from BAH16 -> BAM16
ba_frame = pd.DataFrame(
{
"root_symbol": ["BA"] * 3,
"asset_name": ["Bar"] * 3,
"symbol": ["BAF16", "BAG16", "BAH16"],
"sid": range(5, 8),
"start_date": pd.date_range("2015-01-01", periods=3, tz="UTC"),
"end_date": pd.date_range("2016-01-01", periods=3, tz="UTC"),
"notice_date": pd.date_range("2016-01-01", periods=3, tz="UTC"),
"expiration_date": pd.date_range("2016-01-01", periods=3, tz="UTC"),
"auto_close_date": pd.date_range("2016-01-01", periods=3, tz="UTC"),
"tick_size": [0.001] * 3,
"multiplier": [1000.0] * 3,
"exchange": ["CMES"] * 3,
}
)
# BZ is set up to test the case where the first contract in a chain has
# an auto close date before its start date. It also tests the case
# where a contract in the chain has a start date after the auto close
# date of the previous contract, leaving a gap with no active contract.
bz_frame = pd.DataFrame(
{
"root_symbol": ["BZ"] * 4,
"asset_name": ["Baz"] * 4,
"symbol": ["BZF15", "BZG15", "BZH15", "BZJ16"],
"sid": range(8, 12),
"start_date": [
pd.Timestamp("2015-01-02", tz="UTC"),
pd.Timestamp("2015-01-03", tz="UTC"),
pd.Timestamp("2015-02-23", tz="UTC"),
pd.Timestamp("2015-02-24", tz="UTC"),
],
"end_date": pd.date_range(
"2015-02-01",
periods=4,
freq="MS",
tz="UTC",
),
"notice_date": [
pd.Timestamp("2014-12-31", tz="UTC"),
pd.Timestamp("2015-02-18", tz="UTC"),
pd.Timestamp("2015-03-18", tz="UTC"),
pd.Timestamp("2015-04-17", tz="UTC"),
],
"expiration_date": pd.date_range(
"2015-02-01",
periods=4,
freq="MS",
tz="UTC",
),
"auto_close_date": [
pd.Timestamp("2014-12-29", tz="UTC"),
pd.Timestamp("2015-02-16", tz="UTC"),
pd.Timestamp("2015-03-16", tz="UTC"),
pd.Timestamp("2015-04-15", tz="UTC"),
],
"tick_size": [0.001] * 4,
"multiplier": [1000.0] * 4,
"exchange": ["CMES"] * 4,
}
)
return pd.concat([fo_frame, ba_frame, bz_frame])
def test_contract_at_offset(self):
contract_sids = np.array([1, 2, 3, 4], dtype=np.int64)
start_dates = pd.date_range("2015-01-01", periods=4, tz="UTC")
contracts = deque(self.asset_finder.retrieve_all(contract_sids))
oc = OrderedContracts("FOOBAR", contracts)
assert 1 == oc.contract_at_offset(
1, 0, start_dates[-1].value
), "Offset of 0 should return provided sid"
assert 2 == oc.contract_at_offset(
1, 1, start_dates[-1].value
), "Offset of 1 should return next sid in chain."
assert None is oc.contract_at_offset(
4, 1, start_dates[-1].value
), "Offset at end of chain should not crash."
def test_active_chain(self):
contract_sids = np.array([1, 2, 3, 4], dtype=np.int64)
contracts = deque(self.asset_finder.retrieve_all(contract_sids))
oc = OrderedContracts("FOOBAR", contracts)
# Test sid 1 as days increment, as the sessions march forward
# a contract should be added per day, until all defined contracts
# are returned.
chain = oc.active_chain(1, pd.Timestamp("2014-12-31", tz="UTC").value)
assert [] == list(chain), (
"On session before first start date, no contracts "
"in chain should be active."
)
chain = oc.active_chain(1, pd.Timestamp("2015-01-01", tz="UTC").value)
assert [1] == list(chain), (
"[1] should be the active chain on 01-01, since all "
"other start dates occur after 01-01."
)
chain = oc.active_chain(1, pd.Timestamp("2015-01-02", tz="UTC").value)
assert [1, 2] == list(chain), "[1, 2] should be the active contracts on 01-02."
chain = oc.active_chain(1, pd.Timestamp("2015-01-03", tz="UTC").value)
assert [1, 2, 3] == list(
chain
), "[1, 2, 3] should be the active contracts on 01-03."
chain = oc.active_chain(1, pd.Timestamp("2015-01-04", tz="UTC").value)
assert 4 == len(chain), (
"[1, 2, 3, 4] should be the active contracts on "
"01-04, this is all defined contracts in the test "
"case."
)
chain = oc.active_chain(1, pd.Timestamp("2015-01-05", tz="UTC").value)
assert 4 == len(chain), (
"[1, 2, 3, 4] should be the active contracts on "
"01-05. This tests the case where all start dates "
"are before the query date."
)
# Test querying each sid at a time when all should be alive.
chain = oc.active_chain(2, pd.Timestamp("2015-01-05", tz="UTC").value)
assert [2, 3, 4] == list(chain)
chain = oc.active_chain(3, pd.Timestamp("2015-01-05", tz="UTC").value)
assert [3, 4] == list(chain)
chain = oc.active_chain(4, pd.Timestamp("2015-01-05", tz="UTC").value)
assert [4] == list(chain)
# Test defined contract to check edge conditions.
chain = oc.active_chain(4, pd.Timestamp("2015-01-03", tz="UTC").value)
assert [] == list(chain), (
"No contracts should be active, since 01-03 is " "before 4's start date."
)
chain = oc.active_chain(4, pd.Timestamp("2015-01-04", tz="UTC").value)
assert [4] == list(chain), "[4] should be active beginning at its start date."
def test_delivery_predicate(self):
contract_sids = range(5, 8)
contracts = deque(self.asset_finder.retrieve_all(contract_sids))
oc = OrderedContracts(
"BA",
contracts,
chain_predicate=partial(delivery_predicate, set(["F", "H"])),
)
# Test sid 1 as days increment, as the sessions march forward
# a contract should be added per day, until all defined contracts
# are returned.
chain = oc.active_chain(5, pd.Timestamp("2015-01-05", tz="UTC").value)
assert [5, 7] == list(chain), (
"Contract BAG16 (sid=6) should be ommitted from chain, since "
"it does not satisfy the roll predicate."
)
def test_auto_close_before_start(self):
contract_sids = np.array([8, 9, 10, 11], dtype=np.int64)
contracts = self.asset_finder.retrieve_all(contract_sids)
oc = OrderedContracts("BZ", deque(contracts))
# The OrderedContracts chain should omit BZF16 and start with BZG16.
assert oc.start_date == contracts[1].start_date
assert oc.end_date == contracts[-1].end_date
assert oc.contract_before_auto_close(oc.start_date.value) == 9
# The OrderedContracts chain should end on the last contract even
# though there is a gap between the auto close date of BZG16 and the
# start date of BZH16. During this period, BZH16 should be considered
# the center contract, as a placeholder of sorts.
assert oc.contract_before_auto_close(contracts[1].notice_date.value) == 10
assert oc.contract_before_auto_close(contracts[2].start_date.value) == 10
class NoPrefetchContinuousFuturesTestCase(ContinuousFuturesTestCase):
DATA_PORTAL_MINUTE_HISTORY_PREFETCH = 0
DATA_PORTAL_DAILY_HISTORY_PREFETCH = 0
| 38.795569
| 88
| 0.526295
|
0a5599e0e159b2035132bfac2aeb72cc55e4cda1
| 6,130
|
py
|
Python
|
fsdemo/gallery.py
|
dyslab/flask-site-demo
|
6c00a3a7df724d27577f5c57f5f169607e06570d
|
[
"MIT"
] | 1
|
2020-05-25T19:55:07.000Z
|
2020-05-25T19:55:07.000Z
|
fsdemo/gallery.py
|
dyslab/flask-site-demo
|
6c00a3a7df724d27577f5c57f5f169607e06570d
|
[
"MIT"
] | null | null | null |
fsdemo/gallery.py
|
dyslab/flask-site-demo
|
6c00a3a7df724d27577f5c57f5f169607e06570d
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, request, redirect, render_template
from flask import url_for, current_app
from flask_uploads import UploadSet, IMAGES, configure_uploads
from fsdemo.pagedata.gallery import GalleryUploadPageData, GalleryListPageData
from fsdemo.pagedata.gallery import GalleryEditPageData
from fsdemo.pagedata.gallery import GTagsMiddleware, GalleryMiddleware
from fsdemo.response import JsonResponse
gallery_page = Blueprint(
'gallery',
__name__,
static_folder='static',
template_folder='templates'
)
@gallery_page.route('/', methods=['GET', 'POST'])
def gallery_index():
return redirect(url_for('.gallery_list'))
@gallery_page.route('/upload', methods=['GET', 'POST'])
def gallery_upload():
# print(current_app.config['UPLOADS_DEFAULT_DEST'])
return render_template(
'gallery_item.html',
action='UPLOAD',
pageData=GalleryUploadPageData()
)
@gallery_page.route('/save/tags', methods=['POST'])
def gallery_save_tags():
res = JsonResponse()
try:
tags = request.form.getlist('tags[]')
tags.reverse()
if GTagsMiddleware().save_all(tags):
res.resMsg = 'Note: Tags saved successfully.'
else:
res.resMsg = 'Note: Failed to save tags.'
except Exception:
res.resCode = -1
res.resMsg = "Network error occurred."
pass
return res.outputJsonString()
@gallery_page.route('/do/upload', methods=['POST'])
def gallery_do_upload():
res = JsonResponse()
res.resMsg = 'Upload start.'
if 'photo' in request.files:
try:
photos = UploadSet('photos', IMAGES)
configure_uploads(current_app, (photos))
filename = photos.save(request.files['photo'])
gflag = GalleryMiddleware().save_one(
link=photos.url(filename),
tags=request.form.getlist('tags'),
caption=request.form['caption']
)
if gflag:
res.resMsg = 'Note: Photo saved successfully.'
else:
res.resMsg = 'Note: Failed to save photo.'
res.data = {
'link': photos.url(filename),
'tags': request.form.getlist('tags'),
'caption': request.form['caption']
}
except Exception:
res.resCode = -1
res.resMsg = 'Error: Upload failed.\n\n' + \
'The file size maybe exceeds limited size [' + \
'{0}'.format(current_app.config['MAX_CONTENT_LENGTH_MB']) + \
'MB].'
pass
else:
res.resCode = -1
res.resMsg = 'Error: Cannot find the file field in your upload form.'
# print(res.outputJsonString()) # Print for test.
return res.outputJsonString()
@gallery_page.route('/show/<path:fullpath>', methods=['GET'])
def gallery_show_uploadfiles(fullpath):
return redirect(url_for('static', filename=fullpath))
@gallery_page.route('/list', methods=['GET', 'POST'])
def gallery_list():
return render_template(
'gallery_list.html',
pageData=GalleryListPageData()
)
@gallery_page.route('/list/photos', methods=['POST'])
def gallery_list_photos():
res = JsonResponse()
try:
keyword = request.form['keyword']
if keyword == 'ALL':
glist = GalleryMiddleware().load_all(
int(request.form['page']),
int(request.form['offset']),
current_app.config['GALLERY_PER_PAGE'],
)
elif keyword == 'TAG':
glist = GalleryMiddleware().load_by_tag(
int(request.form['page']),
int(request.form['offset']),
current_app.config['GALLERY_PER_PAGE'],
request.form['tag']
)
elif keyword == 'YEAR':
glist = GalleryMiddleware().load_by_year(
int(request.form['page']),
int(request.form['offset']),
current_app.config['GALLERY_PER_PAGE'],
int(request.form['year'])
)
else:
glist = None
if glist is not None:
res.resMsg = 'Note: Gallery loaded successfully.'
else:
res.resMsg = 'Note: Failed to load gallery.'
res.data = glist
except Exception:
res.resCode = -1
res.resMsg = "Network error occurred."
pass
return res.outputJsonString()
return
@gallery_page.route('/download/<int:id>', methods=['GET'])
def gallery_download(id):
return GalleryMiddleware().downloadByID(id)
@gallery_page.route('/delete/<int:id>', methods=['GET'])
def gallery_delete(id):
res = JsonResponse()
try:
gflag = GalleryMiddleware().delete_by_id(id)
if gflag:
res.resMsg = 'Note: Deleted photo successfully.'
else:
res.resCode = -1
res.resMsg = 'Note: Failed to delete the photo.'
except Exception:
res.resCode = -1
res.resMsg = 'Error: Network failed.' + \
' Check your network connection please.'
pass
# print(res.outputJsonString()) # Print for test.
return res.outputJsonString()
@gallery_page.route('/edit/<int:id>', methods=['GET'])
def gallery_edit(id):
return render_template(
'gallery_item.html',
action='EDIT',
pageData=GalleryEditPageData(id)
)
@gallery_page.route('/edit/save/<int:id>', methods=['POST'])
def gallery_edit_save(id):
res = JsonResponse()
try:
bflag = GalleryMiddleware().save_by_id(
id=id,
tags=request.form.getlist('tags'),
caption=request.form['caption']
)
if bflag:
res.resMsg = 'Note: Saved changes successfully.'
else:
res.resMsg = 'Note: Failed to save the changes.'
except Exception:
res.resCode = -1
res.resMsg = 'Error: Network failed.' + \
' Check your network connection please.'
pass
# print(res.outputJsonString()) # Print for test.
return res.outputJsonString()
| 31.761658
| 78
| 0.59217
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.