content
stringlengths 5
1.05M
|
|---|
from __future__ import print_function, division
import traceback
__author__ = 'panzer'
class RuntimeException(Exception):
def __init__(self, message):
super(RuntimeException, self).__init__(message)
self.name = RuntimeException.__name__
@staticmethod
def print_trace():
traceback.print_exc()
|
from ..remote import RemoteModel
from infoblox_netmri.utils.utils import check_api_availability
class DeviceZoneRemote(RemoteModel):
"""
Zones defined on a traffic filtering device. On devices that do not natively support zones (e.g., Cisco routers), there is one zone per interface, plus an additional 'internal' zone.
| ``DeviceZoneID:`` The internal NetMRI identifier for this filtering zone.
| ``attribute type:`` number
| ``DeviceID:`` The internal NetMRI identifier for the device to which this zone belongs.
| ``attribute type:`` number
| ``DataSourceID:`` The internal NetMRI identifier for the collector NetMRI that collected this data record.
| ``attribute type:`` number
| ``ZoneFirstSeenTime:`` The timestamp of when NetMRI first discovered this service.
| ``attribute type:`` datetime
| ``ZoneStartTime:`` The starting effective time of this record.
| ``attribute type:`` datetime
| ``ZoneEndTime:`` The ending effective time of this record, or empty if still in effect.
| ``attribute type:`` datetime
| ``ZoneTimestamp:`` The date and time this record was collected or calculated.
| ``attribute type:`` datetime
| ``ZoneChangedCols:`` The fields that changed between this revision of the record and the previous revision.
| ``attribute type:`` string
| ``ZoneName:`` The name of the zone.
| ``attribute type:`` string
| ``ZoneProvisionData:`` Internal data - do not modify, may change without warning.
| ``attribute type:`` string
| ``ZoneInterfaceCount:`` The total number of interfaces connected to this zone.
| ``attribute type:`` number
| ``ZoneInterfaceID:`` The internal NetMRI identifier of that interface, if only one interface connected to this zone.
| ``attribute type:`` number
| ``ZoneArtificialInd:`` A flag indicating that this zone has no counterpart in the device configuration.
| ``attribute type:`` bool
"""
properties = ("DeviceZoneID",
"DeviceID",
"DataSourceID",
"ZoneFirstSeenTime",
"ZoneStartTime",
"ZoneEndTime",
"ZoneTimestamp",
"ZoneChangedCols",
"ZoneName",
"ZoneProvisionData",
"ZoneInterfaceCount",
"ZoneInterfaceID",
"ZoneArtificialInd",
)
@property
@check_api_availability
def data_source(self):
"""
The collector NetMRI that collected this data record.
``attribute type:`` model
"""
return self.broker.data_source(**{"DeviceZoneID": self.DeviceZoneID })
@property
@check_api_availability
def zone_interface(self):
"""
The Interface linked to this zone (if only one interface linked to this zone)
``attribute type:`` model
"""
return self.broker.zone_interface(**{"DeviceZoneID": self.DeviceZoneID })
@property
@check_api_availability
def device(self):
"""
The device from which this data was collected.
``attribute type:`` model
"""
return self.broker.device(**{"DeviceZoneID": self.DeviceZoneID })
|
if __name__ == '__main__':
import sys
import pydart2 as pydart
if len(sys.argv) != 2:
print("Usage: view_skeleton.py [*.urdf/*.sdf]")
exit(0)
skel_path = sys.argv[1]
print("skeleton path = %s" % skel_path)
pydart.init()
print("Pydart init OK")
world = pydart.World(0.0005)
print("World init OK")
skel = world.add_skeleton(skel_path)
print("Skeleton add OK")
print("Camera:")
print(" drag: rotate camera")
print(" shift-drag: zoom camera")
print(" control-drag: translate camera")
pydart.gui.viewer.launch(world)
|
#!/usr/bin/env python3
import numpy as np
import scipy.io as sio
import sklearn.feature_extraction
from pathlib import Path
import argparse
parser = argparse.ArgumentParser(description='Sample patches from figures')
parser.add_argument('--experimentName', default="naturalImages",
help='the folder name to use for the data')
args = parser.parse_args()
experiment_name = args.experimentName
# The nature paper begins with ten 512x512 images
# from which 16x16 pixel image patches should be sampled
large_images = sio.loadmat("%s/IMAGES.mat" % experiment_name)['IMAGES']
# turns out those images are not in the scale from 0 to 1, so scale
# them accordingly first
minValue = np.min(large_images)
maxValue = np.max(large_images)
newmin = 0.0
newmax = 1.0
large_images = (large_images-minValue) / (maxValue - minValue) * (newmax - newmin) + newmin
def sample_patches(large_images, num_patches_per_image = 10000):
num_images = large_images.shape[-1]
num_patches = num_patches_per_image * num_images
patches = np.zeros((num_patches, 1, 16, 16))
for i in range(num_images):
patches[(i * num_patches_per_image):((i+1) * num_patches_per_image),0,:,:] = sklearn.feature_extraction.image.extract_patches_2d(large_images[:,:,i], (16,16), max_patches = num_patches_per_image)
return patches
train_patches = sample_patches(large_images)
test_patches = sample_patches(large_images, num_patches_per_image = 1000)
save_folder = "%s" % experiment_name
Path(save_folder + "/train").mkdir(parents=True, exist_ok=True)
np.savez_compressed(save_folder + "/train/data.npz", images=train_patches)
Path(save_folder + "/test").mkdir(parents=True, exist_ok=True)
np.savez_compressed(save_folder + "/test/data.npz", images=test_patches)
|
import uuid as uuid
from django.db import models
class News(models.Model):
"""
News item contains the necessary information to read out the news.
"""
# External ID, might be textual or number.
external_id = models.CharField(max_length=128)
uuid = models.UUIDField(default=uuid.uuid4)
# Main title for the news
title = models.CharField(max_length=256, default="")
# Body/content of the news
content = models.TextField(default="")
# Url to an audio file that contains the news
audio_url = models.URLField(default="", blank=True)
# Timestamps
created = models.DateTimeField()
modified = models.DateTimeField(auto_now=True)
def __str__(self):
return "{} ({})".format(self.external_id, self.created)
|
__author__ = "Chris Brannon <chris@the-brannons.com>"
__copyright__ = "See the file LICENSE for copyright and license info"
__license__ = "See the file LICENSE for copyright and license info"
__version__ = "0.3"
__email__ = "chris@the-brannons.com"
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
from collections import defaultdict as dd
import argparse
import gzip
import util
import sys
import os
import logging
import shutil
import struct
import subprocess
import tempfile
import bz2file
def compress(source, target, reference, nointra, delta, inner, pagesize=4096):
# some info
logging.debug("Starting compression of %s to %s", repr(source), repr(target))
logging.debug("Page size: %d", pagesize)
logging.debug("Reference dump: %s", reference)
# pages + page numbers bookkeeping
reference_pages, reference_pagenrs = [], {}
for i, page in enumerate(util.get_pages(reference)):
reference_pages.append(page)
if page not in reference_pagenrs:
reference_pagenrs[page] = i
reference_pages_set = set(reference_pages)
# find new + duplicatable pages
dedups = dd(list)
diffs = dd()
diff_seen = set()
if nointra:
new_pagenrs = []
else:
new_pagenrs = dd(list)
new_pages = []
same_distinct, same_total = set(), 0
source_pages = []
for i, page in enumerate(util.get_pages(source)):
source_pages.append(page)
if reference_pages[i] != page:
if page not in reference_pages_set:
if delta is not None:
d = util.create_diff(reference_pages[i], page)
if d is not None:
diff_seen.add(page)
diffs[i] = d
continue
if nointra:
new_pagenrs.append(i)
else:
new_pagenrs[page].append(i)
new_pages.append(page)
else:
dedups[page].append(i)
else:
same_total += 1
same_distinct.add(page)
source_pages_set = set(source_pages)
newpagescnt = len(new_pages), len(set(new_pages))
# intervalize
if nointra:
new_pagenrs = util.intervalize(new_pagenrs)
else:
new_pagenrs = {page: util.intervalize(new_pagenrs[page]) for page in new_pagenrs}
dedups = {page: util.intervalize(dedups[page]) for page in dedups}
# write file
util.create_dir(".tmp")
tmphandle, tmpfile = tempfile.mkstemp(dir=".tmp")
try:
with open(tmpfile, "wb") as ftmp:
ftmp.write(reference + "\x00")
inorder = []
seen = set()
for page in reference_pages:
if page in dedups and page not in seen:
inorder.append(page)
seen.add(page)
util.create_pagenr_list([reference_pagenrs[page] for page in inorder], ftmp)
for page in inorder:
ftmp.write(util.create_interval_list(dedups[page]))
if delta is not None:
util.create_pagenr_list(sorted(diffs), ftmp)
for pagenr in sorted(diffs):
ftmp.write(diffs[pagenr])
if nointra:
ftmp.write(util.create_interval_list(new_pagenrs))
for page in new_pages:
ftmp.write(page)
else:
ftmp.write(struct.pack("<I", len(new_pagenrs)))
for page in new_pagenrs:
ftmp.write(util.create_interval_list(new_pagenrs[page]))
for page in new_pagenrs:
ftmp.write(page)
with open(tmpfile, "rb") as ftmp, open(target, "wb") as ftarget:
ftarget.write(util.create_header(create_method_name(nointra, delta, inner), os.path.getsize(source)))
ftarget.flush()
if inner is None:
shutil.copyfileobj(ftmp, ftarget)
elif inner == "gzip":
with gzip.GzipFile(fileobj=ftarget, mode="wb", compresslevel=9) as ftarget:
shutil.copyfileobj(ftmp, ftarget)
elif inner == "bzip2":
with bz2file.BZ2File(filename=ftarget, mode="wb", compresslevel=9) as ftarget:
shutil.copyfileobj(ftmp, ftarget)
elif inner == "7zip":
p = subprocess.Popen(["7za", "a", "-an", "-txz", "-mx=9", "-si", "-so", source], stdin=ftmp, stdout=ftarget, stderr=subprocess.PIPE)
p.communicate()
finally:
os.close(tmphandle)
os.remove(tmpfile)
# some info
dedup_distinct = len(set(dedups.keys()) | same_distinct)
dedup_total = same_total + sum(b - a + 1 for l in dedups.values() for a, b in l)
logging.debug("Deduplicated pages at the same offset: %d/%d (%d/%d)", same_total, len(source_pages), len(same_distinct), len(source_pages_set))
logging.debug("Deduplicated pages at different offsets: %d/%d (%d/%d)", dedup_total - same_total, len(source_pages), len(dedups), len(source_pages_set))
logging.debug("Deduplicated pages in total: %d/%d (%d/%d)", dedup_total, len(source_pages), dedup_distinct, len(source_pages_set))
if delta is not None:
logging.debug("Diffed pages: %d/%d (%d/%d)", len(diffs), len(source_pages), len(diff_seen), len(source_pages_set))
logging.debug("New pages: %d/%d (%d/%d)", newpagescnt[0], len(source_pages), newpagescnt[1], len(source_pages_set))
logging.debug("Done")
return 0
def decompress(source, target):
# some info
logging.debug("Starting decompression of %s to %s", repr(source), repr(target))
with open(source, "rb") as fsource:
# some info
logging.debug("Parsing header")
magic, method, majorversion, minorversion, pagesize, uncompressed_size = util.parse_header(fsource)
logging.debug(" Magic number: %s", repr(magic))
logging.debug(" Method: %s", repr(method))
logging.debug(" Major version number: %d", majorversion)
logging.debug(" Minor version number: %d", minorversion)
logging.debug(" Page size: %d", pagesize)
logging.debug(" Uncompressed size: %d", uncompressed_size)
nointra, delta, inner = parse_method_name(method)
fsource.flush()
tmphandle, tmpfile = None, None
if inner == "gzip":
fsource = gzip.GzipFile(fileobj=fsource, mode="rb", compresslevel=9)
elif inner == "bzip2":
fsource = bz2file.BZ2File(filename=fsource, mode="rb", compresslevel=9)
elif inner == "7zip":
util.create_dir(".tmp")
tmphandle, tmpfile = tempfile.mkstemp(dir=".tmp")
with open(tmpfile, "wb") as ftmp:
p = subprocess.Popen(["7za", "x", "-txz", "-si", "-so"], stdin=fsource, stdout=ftmp, stderr=subprocess.PIPE)
p.communicate()
fsource = open(tmpfile, "rb")
try:
reference = util.parse_string(fsource)
logging.debug("Reference dump: %s", reference)
# parse deduplicated pages
fills = {}
reference_list = list(util.parse_pagenr_list(fsource))
for i in xrange(len(reference_list)):
for left, right in util.parse_interval_list(fsource):
for pagenr in xrange(left, right+1):
fills[pagenr] = reference_list[i]
# parse diffs
if delta:
diffs = {}
pagenrs = list(util.parse_pagenr_list(fsource))
for i in xrange(len(pagenrs)):
diffs[pagenrs[i]] = util.parse_diff(fsource, pagesize)
# parse new pages
newpages = {}
newdistinct = set()
if nointra:
for left, right in list(util.parse_interval_list(fsource)):
for j in xrange(left, right + 1):
page = fsource.read(pagesize)
newdistinct.add(page)
newpages[j] = page
else:
newcnt = util.parse_int(fsource, 4)
intervals = []
for _ in xrange(newcnt):
intervals.append(list(util.parse_interval_list(fsource)))
for i in xrange(newcnt):
page = fsource.read(pagesize)
for left, right in intervals[i]:
for j in xrange(left, right + 1):
newdistinct.add(page)
newpages[j] = page
finally:
if tmphandle is not None:
os.close(tmphandle)
os.remove(tmpfile)
if inner is not None:
fsource.close()
# reconstruct file
pagenr = 0
final = uncompressed_size / pagesize
same_distinct, same_total = set(), 0
different_distinct, different_total = set(), 0
seen = set()
diff_seen = set()
with open(reference, "rb") as freference:
with open(target, "wb") as ftarget:
while pagenr < final:
if pagenr in fills:
freference.seek(pagesize * fills[pagenr])
page = freference.read(pagesize)
seen.add(page)
different_distinct.add(page)
different_total += 1
ftarget.write(page)
elif delta and pagenr in diffs:
freference.seek(pagenr * pagesize)
page = freference.read(pagesize)
newpage = util.apply_diff(page, diffs[pagenr])
diff_seen.add(newpage)
ftarget.write(newpage)
elif pagenr in newpages:
seen.add(newpages[pagenr])
ftarget.write(newpages[pagenr])
else:
freference.seek(pagesize * pagenr)
page = freference.read(pagesize)
seen.add(page)
same_distinct.add(page)
same_total += 1
ftarget.write(page)
pagenr += 1
# some info
logging.debug("New pages: %d/%d (%d/%d)", len(newpages), final, len(newdistinct), len(seen))
logging.debug("Deduplicated pages at the same offset: %d/%d (%d/%d)", same_total, final, len(same_distinct), len(seen))
logging.debug("Deduplicated pages at different offsets: %d/%d (%d/%d)", len(fills), final, len(different_distinct), len(seen))
logging.debug("Deduplicated pages in total: %d/%d (%d/%d)", same_total + len(fills), final, len(same_distinct | different_distinct), len(seen))
if delta:
logging.debug("Diffed pages: %d/%d (%d/%d)", len(diffs), final, len(diff_seen), len(seen))
logging.debug("Done")
return 0
def create_method_name(nointra, delta, inner):
nointra = "nointra" if nointra else ""
delta = "delta" if delta is not None else ""
inner = inner if inner is not None else ""
return "interdedup{}{}{}".format(nointra, delta, inner)
def parse_method_name(s):
assert s.startswith("interdedup")
s = s[len("interdedup"):]
delta = False
nointra = False
if s.startswith("nointra"):
nointra = True
s = s[len("nointra"):]
delta = False
if "delta" in s:
delta = True
s = s[s.index("delta") + len("delta"):]
inner = s if len(s) > 1 else None
return nointra, delta, inner
def main():
# cli args checking
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="action")
parser_c = subparsers.add_parser("c")
parser_c.add_argument("source", type=str)
parser_c.add_argument("target", type=str)
parser_c.add_argument("reference", type=str)
parser_c.add_argument("--inner", type=str)
parser_c.add_argument("--delta", type=str)
parser_c.add_argument("--nointra", action="store_true")
parser_d = subparsers.add_parser("d")
parser_d.add_argument("source", type=str)
parser_d.add_argument("target", type=str)
args = parser.parse_args()
# create method name
if args.action == "c":
method = create_method_name(args.nointra, args.delta, args.inner)
elif args.action == "d":
with open(args.source, "rb") as f:
method = util.parse_header(f)[1]
# set up logging
util.create_dir("logs")
util.configure_logging(method, "logs/{}.log".format(method))
# check if files do (not) exist
if not os.path.isfile(args.source):
logging.error("Source %s does not exist", repr(args.source))
return -1
if os.path.isfile(args.target) and os.path.getsize(args.target) > 0:
logging.error("Target %s already exists and is non-empty", repr(args.target))
return -1
if args.action == "c" and not os.path.isfile(args.reference):
logging.error("Reference %s does not exist", repr(args.reference))
return -1
# compress/decompress
if args.action == "c":
return compress(args.source, args.target, args.reference, args.nointra, args.delta, args.inner)
elif args.action == "d":
return decompress(args.source, args.target)
if __name__ == "__main__":
sys.exit(main())
|
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
# Written by Ross Girshick
"""This file contain the utils class that can
improve programming efficiency on IPU by popart.
"""
import builtins
import string
import numpy as np
import popart
from _globals import GLOBAL_V, set_batch, get_batch_size, get_anchor_return_type, train_mode_on, train_mode, safe_mode, safe_mode_on, safe_mode_off, get_builder, set_builder, set_seed, get_seed, set_options, get_options, set_device, get_device_type, get_ai_onnx_version, set_memory_proportion, get_memory_proportion, enable_global_initializer, get_global_initializer, get_exclude_weights, set_exclude_weights, get_all_trainable_weights, load_model, set_load_strict, load_strict, set_weight_fp16, get_weight_fp16, get_all_tensors_info
CONSTANT_COUNTER = [0]
TENSOR_NAMES = []
def unified_op_warpper(func):
def warpper_func(*args, **kwargs):
results = func(*args, **kwargs)
return results
return warpper_func
def name_scope(scope_name):
return get_builder().nameScope(scope_name)
def add_input_tensor(dtype, shape, debugContext=''):
input_t = get_builder().addInputTensor(popart.TensorInfo(dtype, shape),
debugContext=debugContext)
return TTensor(input_t)
def identity(input, debugContext=''):
return TTensor(get_builder().aiOnnx.identity([input.getIpuIndex()],
debugContext=debugContext))
def pad(data, pads, mode='constant', constant_value=0, debugContext=''):
constant_value = constant(constant_value).cast(data.dtype.upper())
pads = to_tensor(pads).cast('INT64').flatten()
result = get_builder().aiOnnx.pad(
[data.getIpuIndex(),
pads.getIpuIndex(),
constant_value.getIpuIndex()],
mode=mode,
debugContext=debugContext)
return TTensor(result)
def _conv2d(input,
filter,
bias=False,
strides=[1, 1],
pads=[1, 1, 1, 1],
dilations=[1, 1],
group=1,
debugContext=''):
"""Encapsulation of function get_builder().aiOnnx.conv!
args:
x: input tensor
ksize: int,kernel size
stride: int,stride of conv
pads: int, conv padding
c_out: int, output channel
group: int, conv group nums,default:1
"""
args = [input.getIpuIndex(), filter.getIpuIndex()]
if bias:
args.append(bias.getIpuIndex())
output = get_builder().aiOnnx.conv(args,
strides=strides,
pads=pads,
dilations=dilations,
group=group,
debugContext=debugContext)
if get_memory_proportion() is not None:
get_builder().setAvailableMemoryProportion(output,
get_memory_proportion())
return TTensor(output)
def relu(x, debugContext=""):
"""
args:
x: input tensor
"""
if isinstance(x, list):
x = [ele.getIpuIndex() for ele in x]
else:
x = [x.getIpuIndex()]
x = get_builder().aiOnnx.relu(x, debugContext=debugContext)
return TTensor(x)
def maxPooling(x,
strides=[2, 2],
kernel_size=[2, 2],
padding=[0, 0, 0, 0],
dilations=[1, 1],
ceil_mode=0,
debugContext=""):
"""
args:
x:
strides: maxpool stride(output_size=input_size/strides)
kernel_size: window's size that used to find max value
"""
x = get_builder().aiOnnx.maxpool(args=[x.getIpuIndex()],
num_outputs=1,
kernel_shape=kernel_size,
pads=padding,
strides=strides,
dilations=dilations,
ceil_mode=ceil_mode,
debugContext=debugContext)
return TTensor(x[0])
def avgPooling(x,
strides=2,
kernel_size=2,
padding=0,
count_include_pad=0,
debugContext=""):
x = get_builder().aiOnnx.averagepool(
[x.getIpuIndex()],
kernel_shape=[kernel_size, kernel_size],
count_include_pad=count_include_pad,
pads=[padding] * 4,
strides=[strides, strides],
debugContext=debugContext)
return TTensor(x)
def check_all_constant(tensors):
for t in tensors:
if isinstance(t, ConstantTensor) is False:
return False
return True
def resize(x,
roi=None,
scales=None,
sizes=None,
coordinate_transformation_mode='half_pixel',
cubic_coeff_a=-0.75,
exclude_outside=0,
extrapolation_value=0.0,
mode='nearest',
nearest_mode='round_prefer_floor',
debugContext=''):
# TODO Check whether each parameter is correct
# x:N-D tensor
# roi: 1-D tensor given as [start1, ..., startN, end1, ..., endN], where N is the rank of X
# scales: tensor(float),The scale array along each dimension.
# sizes: tensor(int64),The size of the output tensor.
# Only one of 'scales' and 'sizes' can be specified.
assert None in [scales, sizes] and set([scales, sizes]) == 2
if roi is None:
assert coordinate_transformation_mode == 'tf_crop_and_resize'
else:
raise not NotImplementedError
roi = constant(
np.array([0, -1] * x.shape.ndims).astype(
mappin_gc2npy[x.dtype])) if roi is None else roi
scales = constant(np.array(
[1.0] * x.shape.ndims).astype('FLOAT32')) if scales is None else scales
sizes = constant(np.array(
[1] * x.shape.ndims).astype('INT64')) if sizes is None else sizes
inputs_list = [
x.getIpuIndex(),
roi.getIpuIndex(),
scales.getIpuIndex(),
sizes.getIpuIndex()
]
inputs_dic = {
'coordinate_transformation_mode': coordinate_transformation_mode,
'cubic_coeff_a': cubic_coeff_a,
'exclude_outside': exclude_outside,
'extrapolation_value': extrapolation_value,
'mode': mode,
'nearest_mode': nearest_mode,
'debugContext': debugContext
}
result = TTensor(get_builder().aiOnnx.resize(inputs_list, **inputs_dic))
return result
def matmul(x, y, debugContext=""):
if check_all_constant([x, y]):
# degrade to np op
result = np.matmul(x.data, y.data)
return constant(result)
else:
assert x.dtype in ['FLOAT', "FLOAT16"]
assert y.dtype in ['FLOAT', "FLOAT16"]
return TTensor(get_builder().aiOnnx.matmul(
[x.getIpuIndex(), y.getIpuIndex()], debugContext=debugContext))
def scalarTensor2int(t):
if isinstance(t, ConstantTensor):
return int(t.data)
assert isinstance(t, int)
return t
def reshape(source, target_shape, debugContext=""):
"""
args:
source : tensor name
target_shape: list of int e.g.: [3,4,5,6]
"""
if isinstance(target_shape, TTensor):
target_shape = target_shape.data
if isinstance(target_shape, np.ndarray):
target_shape = target_shape.tolist()
if isinstance(target_shape, list):
target_shape = [scalarTensor2int(ele) for ele in target_shape]
target_shape = constant(np.array(target_shape).astype(np.int64),
debugContext=debugContext)
if check_all_constant([source, target_shape]):
# degrade to np op
result = source.data.reshape(target_shape.data)
result = constant(result)
return result
else:
return TTensor(get_builder().aiOnnx.reshape(
[source.getIpuIndex(),
target_shape.getIpuIndex()],
debugContext=debugContext))
def softmax_2d(x, axis=1, debugContext=""):
assert axis in [-1, 1]
assert x.shape.ndims == 2
x = get_builder().aiOnnx.softmax(
[x.getIpuIndex()], axis=axis,
debugContext=debugContext)
return TTensor(x)
def _batchNorm(
x,
scale,
biases,
mean,
var,
num_outputs=1,
momentum=0.9,
epsilon=1e-5,
debugContext="",
):
results = get_builder().aiOnnx.batchnormalization(
[
x.getIpuIndex(),
scale.getIpuIndex(),
biases.getIpuIndex(),
mean.getIpuIndex(),
var.getIpuIndex()
],
num_outputs=num_outputs,
epsilon=epsilon,
momentum=momentum,
debugContext=debugContext)
results = results[0] if num_outputs == 1 else results
if isinstance(results, list):
results = [TTensor(r) for r in results]
else:
results = [TTensor(results)]
return results
def _concat(tensor_list, dim, debugContext=""):
if check_all_constant(tensor_list):
# degrade to np op
np_arr_list = [t.data for t in tensor_list]
result = np.concatenate(np_arr_list, axis=dim)
return constant(result)
return TTensor(get_builder().aiOnnx.concat(
[tensor.getIpuIndex() for tensor in tensor_list],
dim,
debugContext=debugContext))
def sqrt(x, debugContext=""):
result = get_builder().aiOnnx.sqrt([x.getIpuIndex()],
debugContext=debugContext)
return TTensor(result)
def sigmoid(tensor, debugContext=""):
return TTensor(get_builder().aiOnnx.sigmoid([tensor.getIpuIndex()],
debugContext=debugContext))
def transpose(x, dim_order, debugContext=""):
"""dim_order: list of int. eg:[0,2,3,1]"""
if check_all_constant([x]):
# degrade to np op
result = np.transpose(x.data, dim_order)
return constant(result)
return TTensor(get_builder().aiOnnx.transpose([x.getIpuIndex()],
dim_order,
debugContext=debugContext))
def mean(x, debugContext=""):
return TTensor(get_builder().aiOnnx.mean(x.getIpuIndex(),
debugContext=debugContext))
def gc_slice(x, axes, starts, ends, debugContext=""):
if check_all_constant([x, axes, starts, ends]):
# degrade to np op
x = x.data
x_slices = []
for start, end in zip(starts.data.tolist(), ends.data.tolist()):
x_slices.append(slice(start, end))
return constant(x[x_slices])
else:
x = get_builder().aiOnnx.slice([
x.getIpuIndex(),
starts.getIpuIndex(),
ends.getIpuIndex(),
axes.getIpuIndex()
],
debugContext=debugContext)
return TTensor(x)
def topk(x, k, sorted=True, dim=-1, debugContext=""):
"""
args:
k: the count of return
dim: in which dim to sort and clip
"""
if k.shape.ndims == 0:
k = k.unsqueeze(0)
else:
assert k.shape.ndims == 1
values, order = get_builder().aiOnnx.topk(
[x.getIpuIndex(), k.getIpuIndex()],
axis=dim,
sorted=sorted,
debugContext=debugContext)
return TTensor(values), TTensor(order)
def constant(x, debugContext=''):
if np.isscalar(x):
return ConstantScalar(None, x)
assert isinstance(x, np.ndarray)
return ConstantTensor(None, x)
def shapeConstant(x, debugContext=''):
if np.isscalar(x):
x = np.array(x)
assert isinstance(x, np.ndarray)
return ShapeTensor(None, x)
def gather(x, indices, dim=0, debugContext=""):
x = get_builder().aiOnnx.gather(
[x.getIpuIndex(), indices.getIpuIndex()],
axis=dim,
debugContext=debugContext)
return TTensor(x)
def addInitializedInputTensor(array, debugContext=""):
"""
args:
array: an numpy array that will be copy to IPU
return:
str: tensor name
"""
name = get_builder().addInitializedInputTensor(array,
debugContext=debugContext)
return TTensor(name)
def unsqueeze(x, dims, debugContext=""):
"""
args:
dim: list of int of which dim will delete
eg:[3] or [1,3]
"""
if check_all_constant([x]):
# degrade to np op
result = np.expand_dims(x.data, axis=dims)
return constant(result)
x = get_builder().aiOnnx.unsqueeze([x.getIpuIndex()],
axes=dims,
debugContext=debugContext)
return TTensor(x)
def ceil(x, debugContext=""):
result = get_builder().aiOnnx.ceil([x.getIpuIndex()],
debugContext=debugContext)
return TTensor(result)
def squeeze(x, dims, debugContext=""):
if check_all_constant([x]):
# degrade to np op
x = x.data
current_dim = float('inf')
for dim in reversed(dims):
assert current_dim > dim
current_dim = dim
x = x.squeeze(dim)
return constant(x)
if isinstance(dims, int):
dims = [dims]
for dim in dims:
assert x.pureShape[dim] == 1
x = get_builder().aiOnnx.squeeze([x.getIpuIndex()],
axes=dims,
debugContext=debugContext)
return TTensor(x)
def exp(x, debugContext=""):
return TTensor(get_builder().aiOnnx.exp([x.getIpuIndex()],
debugContext=debugContext))
def printTensor(t):
get_builder().aiGraphcore.printtensor([t.getIpuIndex()], print_gradient=0)
def mul(tensors, debugContext=""):
if check_all_constant(tensors):
# degrade to np op
result = 1
for t in tensors:
result = t.data * result
return constant(result, debugContext=debugContext)
return TTensor(get_builder().aiOnnx.mul([t.getIpuIndex() for t in tensors],
debugContext=debugContext))
def add(tensors, debugContext=""):
if check_all_constant(tensors):
# degrade to np op
result = 0
for t in tensors:
result = result + t.data
return constant(result, debugContext=debugContext)
return TTensor(get_builder().aiOnnx.add([t.getIpuIndex() for t in tensors],
debugContext=debugContext))
def div(tensors, debugContext=""):
assert len(tensors) == 2
if check_all_constant(tensors):
# degrade to np op
result = tensors[0].data / tensors[1].data
return constant(result, debugContext=debugContext)
return TTensor(get_builder().aiOnnx.div([t.getIpuIndex() for t in tensors],
debugContext=debugContext))
def sub(tensors, debugContext=""):
assert len(tensors) == 2
if check_all_constant(tensors):
# degrade to np op
result = tensors[0].data - tensors[1].data
return constant(result, debugContext=debugContext)
return TTensor(get_builder().aiOnnx.sub([t.getIpuIndex() for t in tensors],
debugContext=debugContext))
def max(tensor_list, debugContext=""):
if check_all_constant(tensor_list):
# degrade to np op
arr_list = [t.data for t in tensor_list]
result = np.max(arr_list)
return constant(result)
return TTensor(get_builder().aiOnnx.max(
[t.getIpuIndex() for t in tensor_list], debugContext=debugContext))
def min(tensor_list, debugContext=""):
if check_all_constant(tensor_list):
# degrade to np op
arr_list = [t.data for t in tensor_list]
result = np.min(arr_list)
return constant(result)
return TTensor(get_builder().aiOnnx.min(
[t.getIpuIndex() for t in tensor_list], debugContext=debugContext))
def split(x, lenOfSplit, dim, debugContext=""):
"""
args:
lenOfSplit: (4,1) split into two piece,one's length is 4 ,
the other is 1
"""
return TTensor(get_builder().aiOnnx.split([x.getIpuIndex()],
len(lenOfSplit),
dim,
lenOfSplit,
debugContext=debugContext))
def clip(x, minmun=-np.inf, maxmun=np.inf, debugContext=""):
if get_ai_onnx_version() >= 11:
minmun = constant(np.asarray(minmun).astype(np.float32))
maxmun = constant(np.asarray(maxmun).astype(np.float32))
return TTensor(get_builder().aiOnnx.clip(
[x.getIpuIndex(),
minmun.getIpuIndex(),
maxmun.getIpuIndex()],
debugContext=debugContext))
else:
return TTensor(get_builder().aiOnnx.clip([x.getIpuIndex()],
maxmun.getIpuIndex(),
minmun.getIpuIndex(),
debugContext=debugContext))
def reduceprod(x, dim, keepdims=False, debugContext=""):
"""
args:
dim: int .which dim to do prod
"""
x = get_builder().aiOnnx.reduceprod([x.getIpuIndex()],
axes=[dim],
keepdims=keepdims,
debugContext=debugContext)
return TTensor(x)
def cast(x, target_type='FLOAT', debugContext=''):
"""
target_type:
FLOAT|FLOAT16|INT8|INT16|INT32|UINT8|UINT16|UINT32|BOOL
"""
target_type = 'FLOAT' if target_type == 'FLOAT32' else target_type
if check_all_constant([x]):
# degrade to np op
data = x.data.astype(mappin_gc2npy[target_type])
return constant(data)
else:
return TTensor(get_builder().aiOnnx.cast([x.getIpuIndex()],
target_type.upper(),
debugContext))
def log(x, debugContext=''):
return TTensor(get_builder().aiOnnx.log([x.getIpuIndex()], debugContext))
def less(x, y, debugContext=''):
x, y = align_tensor([x, y])
return TTensor(get_builder().aiOnnx.less(
[x.getIpuIndex(), y.getIpuIndex()], debugContext))
def abs(x, debugContext=''):
return TTensor(get_builder().aiOnnx.abs([x.getIpuIndex()], debugContext))
def argmax(x, axis=0, keepdims=0, debugContext=''):
return TTensor(get_builder().aiOnnx.argmax([x.getIpuIndex()], axis,
keepdims, debugContext))
def reducemax(x, axes=0, keepdims=0, debugContext=''):
if isinstance(axes, int):
axes = [axes]
assert isinstance(axes, list) or axes is None
return TTensor(get_builder().aiOnnx.reducemax([x.getIpuIndex()], axes,
keepdims, debugContext))
def reducemin(x, axes=0, keepdims=0, debugContext=''):
if isinstance(axes, int):
axes = [axes]
assert isinstance(axes, list) or axes is None
return TTensor(get_builder().aiOnnx.reducemin([x.getIpuIndex()], axes,
keepdims, debugContext))
def reduceMean(x, axes, keepdims=False, debugContext=''):
if isinstance(axes, int):
axes = [axes]
assert isinstance(axes, list) or axes is None
return TTensor(get_builder().aiOnnx.reducemean([x.getIpuIndex()],
axes=axes,
keepdims=keepdims,
debugContext=debugContext))
def greater(x, y, debugContext=''):
x, y = align_tensor([x, y])
return TTensor(get_builder().aiOnnx.greater(
[x.getIpuIndex(), y.getIpuIndex()], debugContext))
def equal(x, y, debugContext=''):
x, y = align_tensor([x, y])
return TTensor(get_builder().aiOnnx.equal(
[x.getIpuIndex(), y.getIpuIndex()], debugContext))
def logical_and(x, y, debugContext=''):
return TTensor(get_builder().aiOnnx.logical_and(
[x.getIpuIndex(), y.getIpuIndex()], debugContext))
def logical_not(x, debugContext=''):
return TTensor(get_builder().aiOnnx.logical_not([x.getIpuIndex()],
debugContext))
def logical_or(x, y, debugContext=''):
return TTensor(get_builder().aiOnnx.logical_or(
[x.getIpuIndex(), y.getIpuIndex()], debugContext))
def reduceSum(x, axes=None, keepdims=0, debugContext=''):
if isinstance(axes, int):
axes = [axes]
assert isinstance(axes, list) or axes is None
return TTensor(get_builder().aiOnnx.reducesum([x.getIpuIndex()], axes,
keepdims, debugContext))
def cumsum(x, axes, exclusive=0, reverse=0, debugContext=''):
if x.dtype == 'FLOAT16':
raise NotImplementedError('not support fp16')
return TTensor(get_builder().aiOnnx.cumsum(
[x.getIpuIndex(), axes.getIpuIndex()], exclusive, reverse,
debugContext))
def expand(x, shape, debugContext=''):
return TTensor(get_builder().aiOnnx.expand(
[x.getIpuIndex(), shape.getIpuIndex()], debugContext=debugContext))
def randomuniformlike(x, high=6.0, low=-6.0):
result = get_builder().aiOnnx.randomuniformlike([x.getIpuIndex()],
high=high,
low=low)
return TTensor(result)
def flatten(x):
'''implements the np.flatten function
'''
if check_all_constant([x]):
x = x.data.flatten()
return constant(x)
x = get_builder().aiOnnx.flatten([x.getIpuIndex()], 0)
return TTensor(x).squeeze(0)
def oneslike(t, dtype=None, debugContext=''):
dtype = t.dtype if dtype is None else dtype
dshape = t.pureShape
assert 0 not in dshape
return ones(dshape, dtype, debugContext=debugContext)
def ones(shape, dtype='FLOAT', debugContext=''):
return constant(np.ones(shape, dtype=mappin_gc2npy[dtype]),
debugContext=debugContext)
def zeroslike(t, dtype=None, debugContext=''):
dtype = t.dtype if dtype is None else dtype
dshape = t.pureShape
assert 0 not in dshape
return zeros(dshape, dtype, debugContext=debugContext)
def zeros(shape, dtype='FLOAT', debugContext=''):
return constant(np.zeros(shape, dtype=mappin_gc2npy[dtype]),
debugContext=debugContext)
def where(condition, x, y, debugContext=''):
return TTensor(get_builder().aiOnnx.where(
[condition.getIpuIndex(),
x.getIpuIndex(),
y.getIpuIndex()]))
def detach(x):
return TTensor(get_builder().aiGraphcore.detach([x.getIpuIndex()]))
def one_hot(indices, depth, values=None, debugContext=''):
'''
values: [off_value, on_value]
if indice is -1, the corrosponding arr is [0]*depth
'''
if isinstance(depth, int):
depth = to_tensor(depth, dtype='INT64')
if values is None:
values = constant(np.asarray([0, 1]).astype(np.int32),
debugContext=debugContext)
assert indices.dtype in [
'INT32', 'INT64', 'INT16', 'UINT32', 'UINT64', 'UINT16'
]
assert depth.dtype in [
'INT32', 'INT64', 'INT16', 'UINT32', 'UINT64', 'UINT16'
]
assert values.dtype in [
'INT32', 'INT64', 'INT16', 'UINT32', 'UINT64', 'UINT16'
]
result = get_builder().aiOnnx.onehot(
[indices.getIpuIndex(),
depth.getIpuIndex(),
values.getIpuIndex()],
debugContext=debugContext)
result = TTensor(result)
result_shape = list(result.pureShape)
if result_shape[1] == 0:
result_shape[1] = depth
result = result.reshape(result_shape)
return result
def tile(input, repeats, debugContext=""):
if check_all_constant([input, repeats]):
result = np.tile(input.data, repeats.data)
return constant(result)
result = get_builder().aiOnnx.tile(
[input.getIpuIndex(), repeats.getIpuIndex()], debugContext)
return TTensor(result)
def checkTensorsTypeSame(tensors_list):
types = [t.dtype for t in tensors_list]
if len(set(types)) == 2:
assert 'int32' in types and 'int64' in types, 'only int32 and int64 seem as same type'
else:
assert len(set(types)) == 1, 'types should be same'
def np64to32_or_16(np_arr):
# temp ussage while IPU not support with 64bit data
local_mappin = {
'int64': np.int32,
'uint64': np.uint32,
'float64': np.float16
}
if np_arr.dtype.name in list(local_mappin.keys()):
np_arr = np_arr.astype(local_mappin[np_arr.dtype.name])
return np_arr
def to_tensor(x, dtype=None):
# return ConstantTensor if x type is int,float,ndarray,list, or ConstantTensor
# return TTensor if x type is TTensor
if np.isscalar(x) or isinstance(x, list):
# if type(x) in [int,float,list]:
x = np.array(
x
) # python basic type int and float should be converted to np.float32 and np.int32
x = np64to32_or_16(x)
if dtype is not None:
x = x.astype(mappin_gc2npy[dtype])
x = constant(x)
elif type(x) in [np.ndarray]:
if dtype is not None:
x = x.astype(mappin_gc2npy[dtype])
x = constant(x)
elif isinstance(x, TTensor):
pass
else:
raise TypeError('not a legal type')
return x
TYPE_SEQUENCE = {'BOOL': 0, "UINT": 1, "INT": 2, "FLOAT": 3}
def align_tensor(tensors):
tensors = [to_tensor(t) for t in tensors]
_types = [t.dtype.upper() for t in tensors]
bits_list = [mappin_type2bits[t.dtype] for t in tensors]
nakedTypes = [t.dtype.strip(string.digits).upper() for t in tensors]
type_idx = 0
dst_dtype = 'BOOL'
for dtype in nakedTypes:
if TYPE_SEQUENCE[dtype] > type_idx:
type_idx = TYPE_SEQUENCE[dtype]
dst_dtype = dtype
max_bits = builtins.max(bits_list)
if 'FLOAT32' in _types or 'FLOAT' in _types:
max_bits = 32
elif 'FLOAT16' in _types:
max_bits = 16
dst_dtype = dst_dtype + str(max_bits)
new_tensors = []
for t in tensors:
t = t.cast(dst_dtype)
new_tensors.append(t)
tensors = new_tensors
if TTensor in [type(t) for t in tensors]:
tensors = [TTensor(t.getIpuIndex()) for t in tensors]
return tensors
def int32toint64(t):
return t.cast('INT64') if t.type == 'int32' else t
def sum_list(l):
current = 1
for ele in l:
current *= ele
return current
class TTensor():
def __init__(self, name, nodata=False):
assert isinstance(name, str)
TENSOR_NAMES.append(name)
self.__name = name
if safe_mode() and not nodata:
assert isinstance(self.pureShape, (list, tuple))
assert isinstance(self.dtype, str)
get_all_tensors_info().append(str(self))
def copy_from_tensor(self, tensor):
assert self.__class__.__name__ == tensor.__class__.__name__
self.__name = tensor.__name
def getIpuIndex(self, ):
return self.__name
@property
def name(self, ):
return self.getIpuIndex()
def __len__(self, ):
return self.pureShape[0]
def numel(self, ):
return sum_list(self.pureShape)
def unsqueeze(
self,
dims,
):
"""
args:
dim: list of int of which dim will delete
eg:[3] or [1,3]
"""
if isinstance(dims, int):
dims = [dims]
result = unsqueeze(self, dims=dims)
return result
def squeeze(
self,
dims,
):
"""
args:
dim: list of int of which dim will delete
eg:[3] or [1,3]
"""
if isinstance(dims, int):
dims = [dims]
result = squeeze(self, dims=dims)
return result
def cast(self, type_str):
result = cast(self, type_str.upper())
return result
def add(self, tensors):
checkTensorsTypeSame(tensors)
result = add(tensors)
return result
def __add__(self, other):
tensors = align_tensor([self, other])
return self.add(tensors)
def __radd__(self, other):
tensors = align_tensor([other, self])
return self.add(tensors)
def sub(self, tensors):
checkTensorsTypeSame(tensors)
result = sub(tensors)
return result
def __sub__(self, other):
tensors = align_tensor([self, other])
return self.sub(tensors)
def __neg__(self):
return self.__rsub__(0)
def __rsub__(self, other):
tensors = align_tensor([other, self])
return self.sub(tensors)
def mul(self, tensors):
checkTensorsTypeSame(tensors)
result = mul(tensors)
return result
def __mul__(self, other):
tensors = align_tensor([self, other])
return self.mul(tensors)
def __rmul__(self, other):
tensors = align_tensor([other, self])
return self.mul(tensors)
def truediv(self, tensors):
checkTensorsTypeSame(tensors)
result = div(tensors)
return result
def __truediv__(self, other):
tensors = align_tensor([self, other])
return self.truediv(tensors)
def __rtruediv__(self, other):
tensors = align_tensor([other, self])
return self.truediv(tensors)
def __repr__(self, ):
string = self.__class__.__name__ + ': ' + self.__name + ', shape: ' + str(
self.pureShape) + ', dtype: ' + self.dtype
string += ', ID: ' + str(id(self))
return string
def __format__(self, code):
return self.__repr__()
@property
def pureShape(self):
return get_builder().getTensorShape(self.__name)
@property
def shape(self):
self_shape = self.pureShape
self_shape_np = np.array(self_shape, np.uint32)
input_shape_tensor = shapeConstant(self_shape_np)
return input_shape_tensor
@property
def dtype(self):
type_str = get_builder().getTensorDtypeString(self.__name).upper()
if type_str == 'FLOAT32':
return 'FLOAT'
return type_str
def get_shape(self):
return self.shape
def detach(self, ):
return detach(self)
def flatten(self, ):
return flatten(self)
def transpose(self, dim_order):
return transpose(self, dim_order)
def reshape(self, target_shape):
return reshape(self, target_shape)
def __getitem__(self, index):
if not isinstance(index, tuple):
index = (index, )
def get_start(start):
return 0 if start is None else start
def get_stop(stop, dim):
return self.pureShape[dim] if stop is None else stop
axes = []
starts = []
ends = []
squeezes = []
for dim, slice_obj in enumerate(index):
if getattr(slice_obj, 'step', None) is not None:
raise NotImplementedError(
"Tensor indexing don't support interval currently")
if isinstance(slice_obj, int):
start = slice_obj
end = slice_obj + 1
squeezes.append(dim)
elif isinstance(slice_obj, ConstantTensor):
start = int(slice_obj.data)
end = start + 1
else:
start = get_start(slice_obj.start)
start = int(getattr(start, 'data', start))
end = get_stop(slice_obj.stop, dim)
end = int(getattr(end, 'data', end))
axes.append(dim)
starts.append(start)
ends.append(end)
axes = constant(np.array(axes))
starts = constant(np.array(starts))
ends = constant(np.array(ends))
# do slice in onnx
output = gc_slice(self, axes, starts, ends)
if len(squeezes) > 0:
output = squeeze(output, dims=squeezes)
return output
class ConstantTensor(TTensor):
def __init__(self, name, data):
assert isinstance(data, np.ndarray)
if data.dtype.name in ['float64', 'float']:
data = data.astype(np.float32)
self.data = data
if name is None:
self.__name = 'uninitialized_constant:' + str(CONSTANT_COUNTER[0])
CONSTANT_COUNTER[0] = CONSTANT_COUNTER[0] + 1
self.initialized = False
else:
super().__init__(name)
self.__name = name
def copy_from_tensor(self, tensor):
assert self.__class__.__name__ == tensor.__class__.__name__
self.__name = tensor.__name
self.data = tensor.data
self.initialized = tensor.initialized
def real_init(self, ):
assert not self.initialized
self.data = np.ascontiguousarray(self.data.copy())
name = get_builder().aiOnnx.constant(self.data)
super().__init__(name)
self.__name = name # private attribute can not be inherited
self.initialized = True
def getIpuIndex(self, ):
if self.initialized is False:
self.real_init()
name = super().getIpuIndex()
assert name is not None
return name
def __repr__(self, ):
string = self.__class__.__name__ + ': ' + self.__name + ', shape: ' + str(
self.pureShape) + ', dtype: ' + self.dtype
string = string + ', constant'
string += ', ID: ' + str(id(self))
return string
@property
def pureShape(self):
return self.data.shape
@property
def dtype(self):
result = self.data.dtype.name.upper()
if result == 'FLOAT32':
result = 'FLOAT'
return result
def as_list(self, ):
return self.data.tolist()
def __getitem__(self, index):
if isinstance(index, int):
return constant(self.data[index])
if not isinstance(index, tuple):
index_tuple = (index, )
else:
index_tuple = index
index_list = []
for index in index_tuple:
if isinstance(index, int):
index = index
index_list.append(index)
continue
else:
index = [index.start, index.stop, index.step]
slices = []
for ele in index:
if isinstance(ele, list):
ele = [scalarTensor2int(ele_ele) for ele_ele in ele]
elif ele is None:
pass
else:
ele = scalarTensor2int(ele)
slices.append(ele)
index_list.append(slice(*slices))
return constant(self.data[tuple(index_list)])
class ShapeTensor(ConstantTensor):
def __init__(self, name, data):
assert len(data.shape) == 1 # ShapeTensor rank is 1
super().__init__(name, data)
self.__name = name
@property
def ndims(self):
return self.numel()
def eliminate_constantTensor_from_list(num_list, to_type):
results = []
for num in num_list:
if isinstance(num, ConstantTensor):
num = to_type(num.data)
results.append(num)
return results
def scalar2num(tensor):
return t.data if isinstance(t, ConstantScalar) else t
class ConstantScalar(ConstantTensor):
def __init__(self, name, data):
assert np.isscalar(data) or data.size == 1 or isinstance(
data, (int, float))
data = np.array(np.asscalar(np.asarray(data).flatten()))
super().__init__(name, data)
self.__name = name
def __eq__(self, other):
return scalar2num(self) == scalar2num(other)
def __ge__(self, other):
return scalar2num(self) >= scalar2num(other)
def __le__(self, other):
return scalar2num(self) <= scalar2num(other)
def __lt__(self, other):
return scalar2num(self) < scalar2num(other)
def __gt__(self, other):
return scalar2num(self) > scalar2num(other)
def __ne__(self, other):
return scalar2num(self) > scalar2num(other)
def nllloss(prob,
label,
reductionType=popart.ReductionType.Mean,
debugPrefix=''):
# prob: scaled probabilities, [batch, classes], float
# label: labels, [batch,], int32
with name_scope(debugPrefix):
loss = get_builder().aiGraphcore.nllloss(
[prob.getIpuIndex(), label.getIpuIndex()],
reductionType,
debugContext=debugPrefix)
return TTensor(loss)
def get_all_params():
all_params = [
TTensor(name) for name in get_builder().getInputTensorIds()
if get_builder().isInitializer(name)
]
return all_params
mappin_type2bits = {
'FLOAT': 32,
'FLOAT16': 16,
"UINT8": 8,
"UINT16": 16,
"UINT32": 32,
"UINT64": 64,
"INT8": 8,
"INT16": 16,
"INT32": 32,
"INT64": 64,
"BOOL": 8
}
mappin_gc2npy = {
'float': np.float32,
'float32': np.float32,
'float16': np.float16,
'int8': np.int8,
'int16': np.int16,
'int32': np.int32,
'int64': np.int64,
'uint8': np.uint8,
'uint16': np.uint16,
'uint32': np.uint32,
'bool': np.bool,
'FLOAT': np.float32,
'FLOAT32': np.float32,
'FLOAT16': np.float16,
'INT8': np.int8,
'INT16': np.int16,
'INT32': np.int32,
'INT64': np.int64,
'UINT8': np.uint8,
'UINT16': np.uint16,
'UINT32': np.uint32,
'BOOL': np.bool,
}
# optimizer
class SGD:
def __init__(self,
learning_rate=0.001,
momentum=0.0005,
weight_decay=0.0001,
use_locking=False,
name='Momentum',
use_nesterov=False,
clip_norm=None,
lossScaling=1.0,
specific_dic={}):
assert use_locking is False
assert use_nesterov is False
self.learning_rate = learning_rate
self.name = name
self.clip_norm = clip_norm
self.lossScaling = lossScaling
self.opti_cfg = {
"defaultLearningRate": (self.learning_rate, False),
"defaultMomentum": (momentum, True),
"defaultWeightDecay": (weight_decay, True),
}
if self.lossScaling != 1.0:
self.opti_cfg['lossScaling'] = (self.lossScaling, True)
if clip_norm is not None:
print('clip norm gradients:', clip_norm)
self.gc_optimizer = popart.SGD(
self.opti_cfg,
clip_norm_settings=[
popart.ClipNormSettings.clipAllWeights(clip_norm)
])
else:
self.gc_optimizer = popart.SGD(self.opti_cfg)
for name in specific_dic:
self.gc_optimizer.insertSpecific(name, specific_dic[name])
def adj_lr(self, lr, sess, specific_dic={}):
self.opti_cfg['defaultLearningRate'] = (lr, False)
new_optimizer = popart.SGD(self.opti_cfg)
for name in specific_dic:
new_optimizer.insertSpecific(name, specific_dic[name])
sess.updateOptimizerFromHost(new_optimizer)
self.gc_optimizer = new_optimizer
def deduce_half(input, fp16_on):
# Derive whether to use half-precision according to the input and the given parameters: fp16_on
assert fp16_on in [False, True, None]
# False: fp32
# True: fp16
# None: depend on input
# return :
# cast flag: False(no cast), fp16/fp32(cast output to fp16/fp32)
# input: casted input
# fp16_on: True(cast weights to fp16), False(cast weights to fp32)
cast_flag = False
if input.dtype.lower() == 'float16':
if fp16_on is None:
fp16_on = True
elif fp16_on is False:
input = input.cast('FLOAT32')
cast_flag = 'FLOAT16'
# pass
elif input.dtype.lower() in ['float32', 'float']:
if fp16_on:
input = input.cast('FLOAT16')
cast_flag = 'FLOAT32'
# pass
else:
raise RuntimeError('input wrong type: {}'.format(input.dtype))
return cast_flag, input, fp16_on
|
import json
import functools
from graphviz import Digraph
def main():
"""
Plots the relationships defined by the JSON files below using Graphviz.
"""
# Read the raw data
relationship_filename = "relationships_by_id.json"
name_map_filename = "ids_to_names.json"
with open(relationship_filename) as relationship_file:
relationships = json.load(relationship_file)
with open(name_map_filename) as name_map_file:
name_map = json.load(name_map_file)
# Create nodes out of Twitter users, and directed edges representing follows.
# Those users with very few followers (defined by importance_limit) are ignored.
importance_limit = 10
nodes = []
edges = []
for user, followers in relationships.items():
if user in name_map and len(followers) > importance_limit:
screen_name = name_map[user]
nodes.append(screen_name)
for follower in map(str, followers):
if follower in name_map and len(relationships[follower]) > importance_limit:
edges.append((screen_name, name_map[follower]))
# Draw the graph.
digraph = functools.partial(Digraph, format='svg')
add_edges(add_nodes(digraph(), nodes), edges).render('pics/g1')
def add_nodes(graph, nodes):
"""
Helper function from http://matthiaseisen.com/articles/graphviz/
"""
for n in nodes:
if isinstance(n, tuple):
graph.node(n[0], **n[1])
else:
graph.node(n)
return graph
def add_edges(graph, edges):
"""
Helper function from http://matthiaseisen.com/articles/graphviz/
"""
for e in edges:
if isinstance(e[0], tuple):
graph.edge(*e[0], **e[1])
else:
graph.edge(*e)
return graph
if __name__ == '__main__':
main()
|
import numpy as np
import os
from surfinpy import p_vs_t
from surfinpy import utils as ut
from surfinpy.data import DataSet
import unittest
from numpy.testing import assert_almost_equal
test_data = os.path.join(os.path.dirname(__file__), 'H2O.txt')
class Testp_vs_t(unittest.TestCase):
# Is this needed ?
# def setUp(self):
# self.testdata = open(test_data).read()
def test_calculate_surface_energy(self):
AE = np.array([-1.0, -2.0])
lnP = np.arange(1, 10)
T = np.arange(1, 10)
coverage = np.array([-10.0*10**18, -20.0*10**18])
SE = 1.0
nsurfaces = 2
x = p_vs_t.calculate_surface_energy(AE, lnP, T, coverage,
SE, nsurfaces)
assert_almost_equal(x[0], 1.)
def test_convert_adsorption_energy(self):
x = p_vs_t.convert_adsorption_energy_units(1)
expected = 96485
assert x == expected
def test_calculate_adsorption_energy(self):
x = p_vs_t.calculate_adsorption_energy(1, 2, 3, 4)
expected = -4.3333333
assert_almost_equal(expected, x, decimal=4)
def test_adsorption_energy(self):
stoich = DataSet(cation = 24, x = 48, y = 0, area = 60.22,
energy = -535.660075, label = "Stoich")
H2O = DataSet(cation = 24, x = 48, y = 2, area = 60.22,
energy = -621.877140, label = "Stoich")
H2O_2 = DataSet(cation = 24, x = 48, y = 4, area = 60.22,
energy = -670.229520, label = "Stoich")
data = [H2O, H2O_2]
x = p_vs_t.adsorption_energy(data, stoich, -10.0)
expected = [np.array([-3194476.7582625]),
np.array([-2281133.22520625])]
assert_almost_equal(expected, x, decimal=4)
def test_initialise(self):
x = ut.read_nist(test_data)
a, b, c, d = p_vs_t.inititalise(x, -10.0, 1000, -13, 5.5)
assert_almost_equal(d[0], -10.000, decimal=3)
assert_almost_equal(d[1], -10.000, decimal=3)
assert_almost_equal(d[-1], -10.103, decimal=3)
assert_almost_equal(d[-2], -10.103, decimal=3)
def test_calculate(self):
stoich = DataSet(cation = 24, x = 48, y = 0, area = 60.22,
energy = -530.0, label = "Stoich")
H2O = DataSet(cation = 24, x = 48, y = 2, area = 60.22,
energy = -620.0, label = "Stoich")
H2O_2 = DataSet(cation = 24, x = 48, y = 4, area = 60.22,
energy = -677.0, label = "Stoich")
data = [H2O, H2O_2]
SE = 1.0
adsorbant = -10.0
thermochem = ut.read_nist(test_data)
system = p_vs_t.calculate(stoich, data, SE, adsorbant, thermochem)
expectedx = np.arange(2, 1000)
expectedy = np.arange(-13, 5.5, 0.1)
expectedz = np.zeros(((expectedy.size), (expectedx.size)))
assert_almost_equal(system.x, expectedx)
assert_almost_equal(system.y, expectedy)
assert_almost_equal(system.z, expectedz)
|
from minio import Minio
from minio.error import ResponseError
import json
import os
# Request
# {
# "inbox": "aellis_catjump_inbox",
# "outbox": "aellis_catjump_outbox"
# }
def handle(st):
req = json.loads(st)
mc = Minio(os.environ['minio_authority'],
access_key=os.environ['minio_access_key'],
secret_key=os.environ['minio_secret_key'],
secure=False)
try:
res = mc.make_bucket(req["inbox"] , location='us-east-1')
except ResponseError as err:
print(err)
try:
res = mc.make_bucket(req["outbox"] , location='us-east-1')
except ResponseError as err:
print(err)
# Response
# Empty - success
# Non-empty - failure
|
# -*- coding: utf-8 -*-
import numpy as np
from random import getrandbits
def check_equality(matrix1, matrix2, encoding = None):
'''
Checks to see if 2 matrices are equivalent. If an encoding is specified,
first converts elements in matrix1 using encoding, then checks for equality
with matrix2
Parameters:
matrix1 (array-like) - The first matrix to compare
matrix2 (array-like) - The second matrix to compare
encoding (dictionary, default = None) - A dictionary used to convert values in matrix1 into
their equivalents in matrix2. If a value in matrix1 is not in the encoding, it will
not be converted
'''
if encoding is None:
return __check_naive(matrix1,matrix2)
for row in range(len(matrix1)):
for col in range(len(matrix1[0])):
if (encoding[matrix1[row][col]] != matrix2[row][col]):
print('indices: ({},{})'.format(row,col))
print('Values: m1 - {}, m2 - {}'.format(encoding[matrix1[row][col]],matrix2[row][col]))
return False
return True
def __check_naive(matrix1,matrix2): # Helper function to checkEquality()
for row in range(len(matrix1)):
for col in range(len(matrix1[0])):
if (matrix1[row][col] != matrix2[row][col]):
print('indices: ({},{})'.format(row,col))
print('Values: m1 - {}, m2 - {}'.format(matrix1[row][col],matrix2[row][col]))
return False
return True
VALUES = {0:'A',1:'G',2:'C',3:'T'}
BASES = ['A','G','T','C']
def generate_seq(length):
"""
Generates a sequence of given length using black magic
Sequence is a string of AGCT
"""
global VALUES
seq = []
intseq = np.floor(np.random.rand(length) * 4)
for i in range(length):
seq.append(VALUES[intseq[i]])
return ''.join(seq)
def mutate_seq(seq,mutation_rate = .01): # Given a sequence string, mutate it randomly according to a ratio
# Use boolean mask to determine random mutation locations
intseq = np.random.rand(len(seq)) < mutation_rate
newseq = []
global BASES
for index in range(len(seq)):
if intseq[index]:
newseq.append(np.random.choice(BASES))
else:
newseq.append(seq[index])
return ''.join(newseq)
|
# Plot the data from the Rpi, read through the "camera_data" topic
import paho.mqtt.client as mqtt
import json
# Connect to the broker
broker_url, broker_port = "192.168.43.210", 1883
client = mqtt.Client()
client.connect(broker_url, broker_port)
# Wrap the commands issuing in a loop
try:
client.loop_start()
while True:
command = eval(input())
# Publish a request to the motor topic
message = json.dumps(command).encode('utf-8')
client.publish(topic="motor_request", payload=message, qos=0, retain=False)
finally:
client.loop_stop()
|
########################################################################################
#
# Implement random permutations in the encoder for potentially beneficial
# regularization.
#
# Author(s): Nik Vaessen
########################################################################################
from typing import Optional
import torch as t
import torch.nn as nn
import numpy as np
from transformers.deepspeed import is_deepspeed_zero3_enabled
from transformers.modeling_outputs import BaseModelOutput
from transformers.models.wav2vec2 import configuration_wav2vec2
from src.networks.wav2vec2_components.base_components import (
Wav2vec2Encoder,
Wav2Vec2EncoderStableLayerNorm,
)
########################################################################################
# implementation of encoder with random permutation of time sequence
def local_shuffle(sequence: t.Tensor, mean: float, std: float):
assert len(sequence.shape) == 3
# assume shuffling across 1 dimension
num_tokens = sequence.shape[1]
# add a random normally distributed number to each index
indexes = t.arange(0, num_tokens)
noise = t.normal(mean, std, (num_tokens,))
noisy_indexes = indexes + noise
# sort the noisy indexes
_, sorted_indexes = t.sort(noisy_indexes)
# shuffle based on the sorted order of the noisy indexes
shuffled_sequence = sequence[:, sorted_indexes, :]
return shuffled_sequence
class Wav2vec2RandomPermutationEncoder(nn.Module):
shuffle_location_options = ["before", "after", "after+in_encoder", None]
def __init__(
self,
cfg: configuration_wav2vec2.Wav2Vec2Config,
enable_gradient_checkpointing: bool,
pretrained_weights: Optional[str] = None,
shuffling_location: Optional[str] = None,
shuffling_std: Optional[float] = None,
):
super().__init__()
base_encoder = Wav2vec2Encoder(
cfg,
enable_gradient_checkpointing=enable_gradient_checkpointing,
pretrained_weights=pretrained_weights,
)
self.config = base_encoder.encoder.config
self.pos_conv_embed = base_encoder.encoder.pos_conv_embed
self.layer_norm = base_encoder.encoder.layer_norm
self.dropout = base_encoder.encoder.dropout
self.layers = base_encoder.encoder.layers
self.gradient_checkpointing = base_encoder.encoder.gradient_checkpointing
del base_encoder
if shuffling_location not in self.shuffle_location_options:
raise ValueError(
f"{shuffling_location} should be one of"
f" {self.shuffle_location_options}"
)
if shuffling_location and shuffling_std is None:
raise ValueError(
f"{shuffling_std=} should be defined when {shuffling_location=}"
)
self.shuffling_location = shuffling_location
self.shuffling_std = shuffling_std
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
# make sure padded tokens output 0
hidden_states[~attention_mask] = 0.0
# extend attention_mask
attention_mask = (
1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
) * -10000.0
attention_mask = attention_mask.expand(
attention_mask.shape[0],
1,
attention_mask.shape[-1],
attention_mask.shape[-1],
)
if self.training and self.shuffling_location == "before":
hidden_states = local_shuffle(hidden_states, 0, self.shuffling_std)
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
for idx, layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if (
self.training
and self.shuffling_location == "after"
or self.shuffling_location == "after+in_encoder"
):
if self.shuffling_location == "after":
if idx == 0:
hidden_states = local_shuffle(
hidden_states, 0, self.shuffling_std
)
else:
hidden_states = local_shuffle(hidden_states, 0, self.shuffling_std)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = np.random.uniform(0, 1)
skip_the_layer = (
True
if self.training and (dropout_probability < self.config.layerdrop)
else False
)
if not skip_the_layer or deepspeed_zero3_is_enabled:
# under deepspeed zero3 all gpus must run in sync
if self.gradient_checkpointing and self.training:
# create gradient checkpointing function
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = t.utils.checkpoint.checkpoint(
create_custom_forward(layer),
hidden_states,
attention_mask,
)
else:
layer_outputs = layer(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, all_hidden_states, all_self_attentions]
if v is not None
)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
class Wav2vec2RandomPermutationEncoderStableLayerNorm(nn.Module):
shuffle_input_sequence_options = ["before", "after", "after+in_encoder", None]
def __init__(
self,
cfg: configuration_wav2vec2.Wav2Vec2Config,
enable_gradient_checkpointing: bool,
pretrained_weights: Optional[str] = None,
shuffling_location: Optional[str] = None,
shuffling_std: Optional[float] = None,
):
super().__init__()
base_encoder = Wav2Vec2EncoderStableLayerNorm(
cfg,
enable_gradient_checkpointing=enable_gradient_checkpointing,
pretrained_weights=pretrained_weights,
)
self.config = base_encoder.encoder.config
self.pos_conv_embed = base_encoder.encoder.pos_conv_embed
self.layer_norm = base_encoder.encoder.layer_norm
self.dropout = base_encoder.encoder.dropout
self.layers = base_encoder.encoder.layers
self.gradient_checkpointing = base_encoder.encoder.gradient_checkpointing
del base_encoder
if shuffling_location not in self.shuffle_input_sequence_options:
raise ValueError(
f"{shuffling_location} should be one of"
f" {self.shuffle_input_sequence_options}"
)
if shuffling_location and shuffling_std is None:
raise ValueError(
f"{shuffling_std=} should be defined when {shuffling_location=}"
)
self.shuffling_location = shuffling_location
self.shuffling_std = shuffling_std
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
# make sure padded tokens are not attended to
hidden_states[~attention_mask] = 0
# extend attention_mask
attention_mask = (
1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
) * -10000.0
attention_mask = attention_mask.expand(
attention_mask.shape[0],
1,
attention_mask.shape[-1],
attention_mask.shape[-1],
)
if self.training and self.shuffling_location == "before":
hidden_states = local_shuffle(hidden_states, 0, self.shuffling_std)
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.dropout(hidden_states)
deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
for idx, layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if (
self.training
and self.shuffling_location == "after"
or self.shuffling_location == "after+in_encoder"
):
if self.shuffling_location == "after":
if idx == 0:
hidden_states = local_shuffle(
hidden_states, 0, self.shuffling_std
)
else:
hidden_states = local_shuffle(hidden_states, 0, self.shuffling_std)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = np.random.uniform(0, 1)
skip_the_layer = (
True
if self.training and (dropout_probability < self.config.layerdrop)
else False
)
if not skip_the_layer or deepspeed_zero3_is_enabled:
# under deepspeed zero3 all gpus must run in sync
# XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication
if self.gradient_checkpointing and self.training:
# create gradient checkpointing function
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer),
hidden_states,
attention_mask,
)
else:
layer_outputs = layer(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, all_hidden_states, all_self_attentions]
if v is not None
)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
|
"""Little script for exporting BigGAN zs/ys used in data collection."""
import argparse
import pathlib
import shutil
from src.utils import env
from tqdm.auto import tqdm
parser = argparse.ArgumentParser(description='export biggan zs')
parser.add_argument('--data-dir',
type=pathlib.Path,
help='root data dir (default: project data dir)')
parser.add_argument('--results-dir',
type=pathlib.Path,
help='results dir (default: project results dir)')
parser.add_argument('--datasets',
nargs='+',
default=('imagenet', 'places365'),
help='versions of biggan to export zs for (default: all)')
args = parser.parse_args()
data_dir = args.data_dir or env.data_dir()
results_dir = args.results_dir or (env.results_dir() / 'export-biggan-zs')
results_dir.mkdir(exist_ok=True, parents=True)
for dataset in tqdm(args.datasets):
zs_dir = data_dir / f'biggan-zs-{dataset}'
if not zs_dir.is_dir():
raise FileNotFoundError(f'zs dataset not found: {zs_dir}')
shutil.make_archive(str(results_dir / zs_dir.name),
'zip',
root_dir=data_dir,
base_dir=zs_dir.relative_to(data_dir))
|
import log_config_util
from arxivcitationnetwork import ArxivCitationNetwork, format_name_to_id
from collections import defaultdict
log = log_config_util.get_logger("merger")
N = 5 # Number of status updates per loop
def merge_bierbaum_network(citation_network: ArxivCitationNetwork, metadata_aid_to_doi_title):
"""
"Merge" Bierbaum Network by parsing it into a new object structure and cleaning it.
In this code the network created by the work of Clement et al. is referenced
as "bierbaum's network" simply because it was the root name of the repository.
"""
bierbaum_network = log_config_util.load_metadata("merged_internal-citations.json")
# Stats for loop counter
entries, counter_mark = log_config_util.get_loop_stats(bierbaum_network, N)
log.info("Start Parsing Bierbaum's network. Entries: {}".format(entries))
for counter, content in enumerate(bierbaum_network.items()):
log_config_util.update_counter(counter, counter_mark, entries, N, log)
aid = content[0]
arxiv_citations = content[1]
# Stripping version number for now
aid_without_version = log_config_util.remove_version_ending(aid)
arxiv_citations_without_version = list(map(log_config_util.remove_version_ending, arxiv_citations))
# Get metadata
metadata = metadata_aid_to_doi_title.get(aid_without_version, [])
if metadata:
doi = metadata[0]
title = metadata[1]
else:
doi = title = ""
citation_network.add_arxiv_paper(aid_without_version, arxiv_citations_without_version, [], doi, title)
# Clean data
log.info("Clean ArXiv IDs")
valid_arxiv_ids = set(metadata_aid_to_doi_title.keys())
faulty_ids = citation_network.clean_arxiv_ids(valid_arxiv_ids)
log.info("Cleaned {} bad Arxiv IDs".format(len(faulty_ids)))
log.info("Finished Parsing Bierbaum's network")
def transform_arxiv_metadata(metadata_aid_to_doi_title):
"""
Transform arxiv metadata into specific dicts of data that is contained in it overall for faster access
Titles are already formatted accordingly!
"""
# Get data structures for fast comparison
doi_to_arxiv_id = defaultdict(list)
title_to_arxiv_id = defaultdict(list)
for arxiv_id, content in metadata_aid_to_doi_title.items():
# Add doi if exists
if content[0]:
doi_mag = content[0].upper() # make doi values upper case per definition of MAG
doi_to_arxiv_id[doi_mag] = arxiv_id # Ignoring possible duplicates here, match to "last" arxiv_id
# Add title
title = content[1]
title_to_arxiv_id[format_name_to_id(title)] = arxiv_id # Ignoring possible duplicates here
return title_to_arxiv_id, doi_to_arxiv_id
def add_arxiv_id_to_mag_data(magid_to_data, metadata_aid_to_doi_title):
""" Try to match arxiv metadata title to the titles of a MAG ID. For each match add the corresponding arxiv ID
Afterwards all mag_ids are matched to an arxiv paper if they correspond to an arxiv paper
Thus, all papers that are not matched are most likely not an arxiv paper and thus external
"""
title_to_arxiv_id, doi_to_arxiv_id = transform_arxiv_metadata(metadata_aid_to_doi_title)
valid_titles = set(title_to_arxiv_id.keys())
valid_dois = set(doi_to_arxiv_id.keys())
# Go through all entries and add arxiv ID if possible (roughly adds 200k IDs)
entries, counter_mark = log_config_util.get_loop_stats(magid_to_data, N)
log.info("Start Preprocessing MAG Data. Entries: {}".format(entries))
for counter, content in enumerate(magid_to_data.items()):
log_config_util.update_counter(counter, counter_mark, entries, N, log)
data = content[1]
# Check arxiv_id
arxiv_id = data[3]
if arxiv_id:
continue
# Check doi
doi = data[0]
if doi in valid_dois:
data[3] = doi_to_arxiv_id[doi]
continue
# Check title (using simple title matching)
paper_title_for_matching = ''.join(data[1].split()).lower()
original_title_for_matching = ''.join(data[2].split()).lower()
if original_title_for_matching in valid_titles:
data[3] = title_to_arxiv_id[original_title_for_matching]
continue
if paper_title_for_matching in valid_titles:
data[3] = title_to_arxiv_id[paper_title_for_matching]
log.info("Finished Preprocessing MAG Data")
def get_mag_external_papers_stats(magid_to_data):
"""Get stats about external papers """
external_papers = {mag_id: data for mag_id, data in magid_to_data.items() if not data[3]}
# Get stats about title duplicates
external_papers_title = set([data[2] for mag_id, data in external_papers.items()])
log.info("Title Duplicates in external non-arxiv Papers: {} ({:.2%})".format(
len(external_papers) - len(external_papers_title), 1 - len(external_papers_title) / len(external_papers)))
def add_external_papers_mag(citation_network: ArxivCitationNetwork, magid_to_data):
"""Add external papers to citation network based on MAG data"""
entries, counter_mark = log_config_util.get_loop_stats(magid_to_data, N)
log.info("Start adding external MAG papers. Entries: {}".format(entries))
for counter, content in enumerate(magid_to_data.items()):
log_config_util.update_counter(counter, counter_mark, entries, N, log)
mag_id = content[0]
data = content[1]
# If it is an external paper (e.g. has no referenced arxiv ID) add it
if not data[3]:
title = data[2] # Using original title not paper title of MAG
doi = data[0]
citation_network.add_external_paper(title, doi, mag_id)
log.info("Finished adding external MAG papers")
def add_arxiv_papers_mag(citation_network: ArxivCitationNetwork, magid_to_data, aid_to_ref_magids,
metadata_aid_to_doi_title):
"""
Add arxiv paper to citation network based on the mag data.
We need to add internal arxiv papers based on MAG since the data so far is based on the
successfully downloaded ones but the MAG results are based on the metadata.
"""
# Find arxiv IDs that need to be added
to_add_ids = set(aid_to_ref_magids.keys()) - set(citation_network.arxiv_papers.keys())
log.info("Need to add {} Arxiv IDs that are in the MAG data but not in the network".format(len(to_add_ids)))
entries, counter_mark = log_config_util.get_loop_stats(to_add_ids, 2)
log.info("Start adding arxiv papers from MAG Data. Entries: {}".format(entries))
for counter, arxiv_id in enumerate(to_add_ids):
log_config_util.update_counter(counter, counter_mark, entries, 2, log)
# Get metadata
metadata = metadata_aid_to_doi_title.get(arxiv_id, [])
if metadata:
doi = metadata[0]
title = metadata[1]
else:
doi = title = ""
# Here, we do not add any citations since the next step of the pipeline will do this
citation_network.add_arxiv_paper(arxiv_id, [], [], doi, title)
log.info("Finished adding arxiv papers from MAG Data")
def add_citations_mag(citation_network: ArxivCitationNetwork, magid_to_data, aid_to_ref_magids):
"""Extend citation connections of network based on MAG data"""
# Stats
pre_ac = citation_network.number_arxiv_citations()
pre_ec = citation_network.number_external_citations()
number_overall_references_mag = 0
for key, value in aid_to_ref_magids.items():
number_overall_references_mag += len(value)
entries, counter_mark = log_config_util.get_loop_stats(aid_to_ref_magids, N)
log.info("Start extending citations by MAG Data. Entries: {}".format(entries))
for counter, content in enumerate(aid_to_ref_magids.items()):
log_config_util.update_counter(counter, counter_mark, entries, N, log)
# Get content and object
arxiv_id = content[0]
ref_mag_ids = content[1]
arxiv_paper_object = citation_network.arxiv_papers[arxiv_id]
# Extend citations
for ref_mag_id in ref_mag_ids:
ref_data = magid_to_data[ref_mag_id]
ref_arxiv_id = ref_data[3]
ref_title = ref_data[2]
# Arxiv Reference
if ref_arxiv_id:
arxiv_paper_object.add_arxiv_citation(ref_arxiv_id)
continue
# External Reference
arxiv_paper_object.add_external_citation(ref_title)
# Remove duplicates that might have been created by direct approach above (this improves performance)
arxiv_paper_object.remove_duplicates_in_citations()
log.info("Finished extending citations by MAG Data")
# Stats
post_ac = citation_network.number_arxiv_citations()
new_ac = post_ac - pre_ac
post_ec = citation_network.number_external_citations()
new_ec = post_ec - pre_ec
not_new = number_overall_references_mag - (new_ec + new_ac)
prev_work = max(pre_ac - not_new, 0)
log.info(("Before adding MAG Data, the Citation Network had {} arxiv and {} external citations. \n" +
"Using the MAG Data, {} arxiv citations ({:.2%} of all MAG References)" +
" and {} external citations ({:.2%} of all MAG References) were added" +
" (Here one can read: citations = edges).\n" +
"Consequently, {} ({:.2%}) of the MAG reference were already in the network by previous work or a caused by title duplicates. \n" +
"Furthermore, the previous work was able to find {} citations not found by the MAG. \n" +
"In other words, {:.2%} of the references generated by previous work were not found in the MAG.").format(
pre_ac, pre_ec,
new_ac, new_ac / number_overall_references_mag,
new_ec, new_ec / number_overall_references_mag,
not_new, not_new / number_overall_references_mag,
prev_work, (
prev_work / pre_ac) if pre_ac else 0)) # Avoid division by zero, idea from: https://stackoverflow.com/a/27317595
def merge_mag_data(citation_network: ArxivCitationNetwork, metadata_aid_to_doi_title):
"""Merge MAG Data into citation network"""
# Load MAG Data
aid_to_ref_magids = log_config_util.load_metadata("aid_to_ref_magids.json")
magid_to_data = log_config_util.load_metadata("magid_to_data.json")
# Preprocess magid_to_data
add_arxiv_id_to_mag_data(magid_to_data, metadata_aid_to_doi_title) # roughly adds 200k Arxiv IDs
# Stats
get_mag_external_papers_stats(magid_to_data)
# Main Steps of Merging
log.info("Start Merge of MAG Data")
add_external_papers_mag(citation_network, magid_to_data)
add_arxiv_papers_mag(citation_network, magid_to_data, aid_to_ref_magids, metadata_aid_to_doi_title)
add_citations_mag(citation_network, magid_to_data, aid_to_ref_magids)
log.info("Finished Merge of MAG Data")
def preprocessing_parser_data(parsed_aids_not_matched, metadata_aid_to_doi_title):
"""
Preprocess parser data
- remove duplicates, split into external/internal citations and remove bad arxiv IDs
New format:
{arxiv_id: ([arxiv_ids,...], [titles, ...]),...} whereby titles are external and arxiv_ids internal papers
"""
# Get metadata data
title_to_arxiv_id, _ = transform_arxiv_metadata(metadata_aid_to_doi_title)
valid_titles = set(title_to_arxiv_id.keys())
valid_arxiv_ids = set(metadata_aid_to_doi_title.keys())
# Preprocessing - Get List of arxiv IDs without version and titles in correct format and
log.info("Preprocessing parsed data")
parser_data = {}
for arxiv_id, content in parsed_aids_not_matched.items():
# arxiv_id_no_version = log_config_util.remove_version_ending(arxiv_id)
arxiv_id_no_version = log_config_util.arxiv_filename_to_id(log_config_util.remove_version_ending(arxiv_id))
# Get list of referenced titles and eliminate possible duplicates
cited_titles = set([format_name_to_id(ref["title"]) for ref in content])
# Split for external/internal citations
arxiv_citations = []
external_citations = []
for cited_title in cited_titles:
if cited_title in valid_titles:
arxiv_citations.append(title_to_arxiv_id[cited_title])
else:
external_citations.append(cited_title)
# Duplicate arxiv IDs in the parsed set itself (i.e. version difference) will be eliminated by this
# However, the parsed set should not contain multiple entries for different versions anyhow.
parser_data[arxiv_id_no_version] = (arxiv_citations, external_citations)
# Clean bad arxiv IDs
invalid_entries = set(parser_data.keys()) - set(valid_arxiv_ids)
for key in invalid_entries:
parser_data.pop(key)
# Print stats about external/internal citations
pre_overall_ref = sum([len(content) for key, content in parsed_aids_not_matched.items()])
post_overall_ref = sum([len(content[0]) + len(content[1]) for key, content in parser_data.items()])
internal = sum([len(content[0]) for key, content in parser_data.items()])
external = sum([len(content[1]) for key, content in parser_data.items()])
log.info(("Overall references before preprocessing {}, afterwards {}. " +
"Extracted overall internal (arxiv) references: {}; Extracted overall external references {}.").format(
pre_overall_ref, post_overall_ref,
internal, external)
)
return parser_data
def merge_parser_data(citation_network: ArxivCitationNetwork, metadata_aid_to_doi_title, input_parsed_data):
"""
Add parser data to citation network
As this is in general 'more' unsafe than previous methods, do cautious injection.
"""
parser_data = preprocessing_parser_data(input_parsed_data, metadata_aid_to_doi_title)
log.info("Start mering parsed data")
# Add arxiv papers (Safely)
existing_papers = set(citation_network.arxiv_papers.keys())
for key, data in parser_data.items():
if key in existing_papers:
continue
citation_network.add_arxiv_paper(key)
# Find external papers
external_papers = []
for key, data in parser_data.items():
external_papers += data[1]
external_papers = set(external_papers)
# Add external papers safely
existing_external_papers = set(citation_network.external_papers.keys())
to_add = external_papers - existing_external_papers
for ex_paper_title in to_add:
citation_network.add_external_paper(ex_paper_title, format_name=False)
# Add citations safely
for key, data in parser_data.items():
# Get object
arxiv_paper_obj = citation_network.arxiv_papers[key]
# Get list of citations that still need to be added
to_add_arxiv_cites = set(data[0]) - set(arxiv_paper_obj.arxiv_citations)
to_add_external_cites = set(data[1]) - set(arxiv_paper_obj.external_citations)
# Add citations
for to_add_a_id in to_add_arxiv_cites:
arxiv_paper_obj.add_arxiv_citation(to_add_a_id)
for to_add_title in to_add_external_cites:
arxiv_paper_obj.add_external_citation(to_add_title, format_title=False)
log.info("Finished mering parsed data")
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests that the manage.py commands execute correctly.
These tests only verify that the commands execute and exit with a success code.
They are intended to catch import exceptions and similar problems, it is left
up to tests in other modules to verify that the functionality of each command
works correctly.
"""
import os
import signal
import subprocess
import tempfile
import time
import unittest
from django.db.models import get_models
from google.appengine.ext import db
from appengine_django.models import BaseModel
from appengine_django.models import ModelManager
from appengine_django.models import ModelOptions
from appengine_django.models import RegistrationTestModel
class CommandsTest(unittest.TestCase):
"""Unit tests for the manage.py commands."""
# How many seconds to wait for a command to exit.
COMMAND_TIMEOUT = 10
def runCommand(self, command, args=None, int_after=None, input=None):
"""Helper to run the specified command in a child process.
Args:
command: The name of the command to run.
args: List of command arguments to run the command with.
int_after: If set to a positive integer, SIGINT will be sent to the
running child process after this many seconds to cause an exit. This
should be less than the COMMAND_TIMEOUT value (10 seconds).
input: A string to write to stdin when the command starts. stdin is
closed after the string is written.
Raises:
This function does not return anything but will raise assertion errors if
the command does not exit successfully.
"""
if not args:
args = []
start = time.time()
int_sent = False
fd, tempname = tempfile.mkstemp()
infd = fd
if input:
infd = subprocess.PIPE
child = subprocess.Popen(["./manage.py", command] + args, stdin=infd,
stdout=fd, stderr=fd, cwd=os.getcwdu())
if input:
child.stdin.write(input)
child.stdin.close()
while 1:
rc = child.poll()
if rc is not None:
# Child has exited.
break
elapsed = time.time() - start
if int_after and int_after > 0 and elapsed > int_after and not int_sent:
# Sent SIGINT as requested, give child time to exit cleanly.
os.kill(child.pid, signal.SIGINT)
start = time.time()
int_sent = True
continue
if elapsed < self.COMMAND_TIMEOUT:
continue
# Command is over time, kill and exit loop.
os.kill(child.pid, signal.SIGKILL)
time.sleep(2) # Give time for the signal to be received.
break
# Check child return code
os.close(fd)
self.assertEquals(0, rc,
"%s did not return successfully (rc: %d): Output in %s" %
(command, rc, tempname))
os.unlink(tempname)
def testDiffSettings(self):
"""Tests the diffsettings command."""
self.runCommand("diffsettings")
def testDumpData(self):
"""Tests the dumpdata command."""
self.runCommand("dumpdata")
def testFlush(self):
"""Tests the flush command."""
self.runCommand("flush")
def testLoadData(self):
"""Tests the loaddata command."""
self.runCommand("loaddata")
def testLoadData(self):
"""Tests the loaddata command."""
self.runCommand("loaddata")
def testReset(self):
"""Tests the reste command."""
self.runCommand("reset", ["appengine_django"])
def testRunserver(self):
"""Tests the runserver command."""
self.runCommand("runserver", int_after=2.0)
def testShell(self):
"""Tests the shell command."""
self.runCommand("shell", input="exit")
|
#! /usr/bin/env python3
import argparse
import datetime
import json
if __name__ == "__main__":
args = argparse.ArgumentParser()
args.add_argument("manifest", help="Path to app container Manifest")
m = args.parse_args().manifest
print m
with open(m, 'r') as f:
manifest = json.load(f)
print json.dumps(manifest, indent=2)
try:
if len(manifest["dependencies"]) > 0:
manifest["dependencies"] = []
manifest["annotations"].append({
"name": "squash-date",
"value": "%s" % datetime.datetime.now()
})
print json.dumps(manifest, indent=2)
with open(m, 'w') as f:
json.dump(manifest, f)
except KeyError:
print "KeyError with dependencies"
print "No dependencies or already render/squashed"
|
# 矩阵置零
from typing import List
class Solution:
# 记录0出现的位置,然后再置为0
def setZeroes_1(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
hor, ver = set(), set()
for i in range(len(matrix)):
for j in range(len(matrix[i])):
if matrix[i][j] == 0:
hor.add(i), ver.add(j)
for i in hor:
matrix[i] = [0] * len(matrix[0])
for i in ver:
for j in range(len(matrix)):
matrix[j][i] = 0
|
from django.core.management.base import BaseCommand
from twiggy_goodies.django import LogMixin
from allmychanges.models import LightModerator
class Command(LogMixin, BaseCommand):
help = u"""Deletes stale light moderators from the database"""
def handle(self, *args, **options):
LightModerator.remove_stale_moderators()
|
from collections import defaultdict
#### test expressions
sample1 = "(A & B) & (F | E)"
sample2 = "(A & (B | C)) & (F | E)"
sample3 = "FAKE"
sample4 = "(((A & B) & (C & D)) | 5)"
sample5 = "(A & (B | C)) & (F | E)"
sample6 = "!(A & B) | (C | (D & E))"
sample7 = "!A & B"
sample8 = "!(A & B)"
sample9 = "!(A & B) & !C"
sample10 = "!(A & B) | !(C & D)"
sample11 = "(A | B | C) & (!A | D | E) & (F | G | H)"
sample12 = "(D & !E) | (E & !F)"
#### below variables are used to test sample 6 truth output for ExprTree
sample6__truthtable1 = defaultdict(list)
sample6__truthtable1["B"] = False
sample6__truthtable1["C"] = False
sample6__truthtable1["D"] = True
sample6__truthtable1["E"] = True
sample6__truthtable1_output = {}
sample6__truthtable1_output[" A & B ! "] = [] # undet
sample6__truthtable1_output[" C "] = False
sample6__truthtable1_output[" D & E "] = True
##### decisions: satisfiability
sample_sat1 = "!(A & !B) & C & !D & !(E & F & G) & !H"
sample_sat2 = "!(A & B & !C) & !(D & E) & (G & !H)"
##### syntactical test
syntact1 = "A & B & C & !A"
nonsyntact2 = "A & B & | C"
truthtable1 = defaultdict(list)
truthtable1["A"] = True
truthtable1["B"] = False
truthtable1["C"] = True
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# author: bigfoolliu
"""
一个用来生成各类虚假数据的库
"""
import mimesis
def fake_person():
person = mimesis.Person()
print(person.name())
print(person.full_name())
print(person.age())
if __name__ == "__main__":
fake_person()
|
import sys
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC
from Bio import SeqIO
import psycopg2
import os
import subprocess
listFile = open(sys.argv[1])
outFile = open(sys.argv[2], 'w')
dbName = sys.argv[3]
table = sys.argv[4]
conn = psycopg2.connect("dbname={0}".format(dbName))
cur = conn.cursor()
records = []
for line in listFile:
cdsID = line.split()[0]
queryString = "SELECT id, seq FROM {0} WHERE id = '{1}'".format(table, cdsID)
cur.execute(queryString)
seq = cur.fetchone()[1]
record = SeqRecord(Seq(seq,
IUPAC.unambiguous_dna).translate(),
id=cdsID, name="",
description="")
records.append(record)
SeqIO.write(records, outFile, "fasta")
listFile.close()
outFile.close()
|
import base64
import psycopg2
import openerp
from openerp import SUPERUSER_ID
from openerp import http
from openerp.http import request
from openerp.addons.web.controllers.main import content_disposition
import mimetypes
class MailController(http.Controller):
_cp_path = '/mail'
@http.route('/mail/download_attachment', type='http', auth='user')
def download_attachment(self, model, id, method, attachment_id, **kw):
# FIXME use /web/binary/saveas directly
Model = request.registry.get(model)
res = getattr(Model, method)(request.cr, request.uid, int(id), int(attachment_id))
if res:
filecontent = base64.b64decode(res.get('base64'))
filename = res.get('filename')
content_type = mimetypes.guess_type(filename)
if filecontent and filename:
return request.make_response(
filecontent,
headers=[('Content-Type', content_type[0] or 'application/octet-stream'),
('Content-Disposition', content_disposition(filename))])
return request.not_found()
@http.route('/mail/receive', type='json', auth='none')
def receive(self, req):
""" End-point to receive mail from an external SMTP server. """
dbs = req.jsonrequest.get('databases')
for db in dbs:
message = dbs[db].decode('base64')
try:
registry = openerp.registry(db)
with registry.cursor() as cr:
mail_thread = registry['mail.thread']
mail_thread.message_process(cr, SUPERUSER_ID, None, message)
except psycopg2.Error:
pass
return True
|
from enum import Enum
from benchmarking import benchmark
class MyEnum(Enum):
A = 1
B = 2
C = 3
@benchmark
def enums() -> None:
a = [MyEnum.A, MyEnum.B, MyEnum.C] * 10
n = 0
for i in range(100000):
for x in a:
if x is MyEnum.A:
n += 1
elif x is MyEnum.B:
n += 2
assert n == 3000000, n
|
from flask import Flask, redirect,render_template, request
import json
import view_model as vm
app = Flask(__name__)
@app.route("/")
def main_page():
return render_template("index.html",page_title="Pricer")
@app.route("/get_price",methods=['POST'])
def get_price():
dict_args = {}
dict_args['strategy'] = request.form.get("strategy","Vanilla")
dict_args['type'] = request.form.get("type","European")
dict_args['spot'] = request.form.get("spot",0)
dict_args['strike'] = request.form.get("strike",0)
dict_args['strike2'] = request.form.get("strike2",0)
dict_args['maturity'] = request.form.get("maturity")
dict_args['maturity2'] = request.form.get("maturity2",0)
dict_args['volatility'] = request.form.get("volatility",0.2)
dict_args['interest_rate'] = request.form.get("interest-rate",0)
#The get_price() method from View_Model returns a render_template with the good html page
return vm.View_Model(dict_args).get_price()
if __name__ == "__main__":
app.run(debug=True)
|
"""
与类和对象相关的一些BIF:
1.一个类被认为是其自身的子类
issubclass(class,classinfo)
2.classinfo可以是类对象组成的元组,只要class与其中任何一个候选类的子类,则返回True
issubclass(class,classinfo)
"""
class A:
pass
class B(A):
pass
print('--------------------------------------------')
print(issubclass(B, A))
print('--------------------------------------------')
print(issubclass(B, B))
print('--------------------------------------------')
print(issubclass(B, object))
print('--------------------------------------------')
class C:
pass
print(issubclass(B, C))
print('--------------------------------------------')
""""
isinstance(object,classinfo)
1.如果第一个参数不是对象,则永远返回False
2.如果第二个参数不是类或者由类对象组成的元组,会抛出一个TypeError异常
"""
b1 = B()
print(isinstance(b1, B))
print(isinstance(b1, A))
print(isinstance(b1, C))
print(isinstance(b1, (A, B, C)))
print('--------------------------------------------')
"""
hasattr(object,name)
attr = attribute 属性
"""
class C:
def __init__(self, x=0):
self.x = x
c1 = C()
print(hasattr(c1, 'x'))
print('--------------------------------------------')
"""
getattr(object,name[,default])
"""
print(getattr(c1, 'x'))
print(getattr(c1, 'y', '您所访问的属性不存在....'))
print('--------------------------------------------')
"""
setattr(object,name,value)
设置指定属性
"""
setattr(c1, 'y', 'FishC')
print(getattr(c1, 'y', '您访问的属性不存在....'))
print('--------------------------------------------')
"""
delattr(object,name)
删除指定属性
delattr(c1,'y')
delattr(c1,'y')
"""
"""
property(fget=None,fset=None,fdel=None,doc=None)
通过属性设置属性,3个参数
"""
class C:
def __init__(self, size=10):
self.size = size
def getSize(self):
return self.size
def setSize(self, value):
self.size = value
def delSize(self):
del self.size
x = property(getSize, setSize, delSize)
c1 = C()
print(c1.getSize())
print(c1.x)
c1.x = 18
print(c1.x)
print(c1.getSize())
print('--------------------------------------------')
|
# -*- coding: utf-8 -*-
# cython: language_level=3
# BSD 3-Clause License
#
# Copyright (c) 2020-2022, Faster Speeding
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""The errors and warnings raised within and by Tanjun."""
from __future__ import annotations
__all__: list[str] = [
"CommandError",
"ConversionError",
"HaltExecution",
"FailedCheck",
"MissingDependencyError",
"NotEnoughArgumentsError",
"TooManyArgumentsError",
"ParserError",
"TanjunError",
]
import typing
if typing.TYPE_CHECKING:
from collections import abc as collections
class TanjunError(Exception):
"""The base class for all errors raised by Tanjun."""
__slots__ = ()
class HaltExecution(TanjunError):
"""Error raised while looking for a command in-order to end-execution early.
For the most part, this will be raised during checks in-order to prevent
other commands from being tried.
"""
__slots__ = ()
class MissingDependencyError(TanjunError):
"""Error raised when a dependency couldn't be found."""
__slots__ = ("message",)
message: str
"""The error's message."""
def __init__(self, message: str) -> None:
"""Initialise a missing dependency error.
Parameters
----------
message : str
The error message.
"""
self.message = message
class CommandError(TanjunError):
"""Error raised to end command execution."""
__slots__ = ("message",)
# None or empty string == no response
message: str
"""The response error message.
Tanjun will try to send the string message as a response.
"""
def __init__(self, message: str, /) -> None:
"""Initialise a command error.
Parameters
----------
message : str
String message which will be sent as a response to the message
that triggered the current command.
Raises
------
ValueError
Raised when the message is over 2000 characters long or empty.
"""
if len(message) > 2000:
raise ValueError("Error message cannot be over 2_000 characters long.")
elif not message:
raise ValueError("Response message must have at least 1 character.")
self.message = message
def __str__(self) -> str:
return self.message or ""
# TODO: use this
class InvalidCheck(TanjunError, RuntimeError): # TODO: or/and warning? # TODO: InvalidCheckError
"""Error raised as an assertion that a check will never pass in the current environment."""
__slots__ = ()
class FailedCheck(TanjunError, RuntimeError): # TODO: FailedCheckError
"""Error raised as an alternative to returning `False` in a check."""
__slots__ = ()
class ParserError(TanjunError, ValueError):
"""Base error raised by a parser or parameter during parsing.
.. note::
Expected errors raised by the parser will subclass this error.
"""
__slots__ = ("message", "parameter")
message: str
"""String message for this error.
.. note::
This may be used as a command response message.
"""
parameter: typing.Optional[str]
"""Name of the this was raised for.
.. note::
This will be `builtin.None` if it was raised while parsing the provided
message content.
"""
def __init__(self, message: str, parameter: typing.Optional[str], /) -> None:
"""Initialise a parser error.
Parameters
----------
message : str
String message for this error.
parameter : typing.Optional[str]
Name of the parameter which caused this error, should be `None` if not
applicable.
"""
self.message = message
self.parameter = parameter
def __str__(self) -> str:
return self.message
class ConversionError(ParserError):
"""Error raised by a parser parameter when it failed to converter a value."""
__slots__ = ("errors",)
errors: collections.Sequence[ValueError]
"""Sequence of the errors that were caught during conversion for this parameter."""
parameter: str
"""Name of the parameter this error was raised for."""
def __init__(self, message: str, parameter: str, /, errors: collections.Iterable[ValueError] = ()) -> None:
"""Initialise a conversion error.
Parameters
----------
parameter : tanjun.abc.Parameter
The parameter this was raised by.
errors : collections.abc.Iterable[ValueError]
An iterable of the source value errors which were raised during conversion.
"""
super().__init__(message, parameter)
self.errors = tuple(errors)
class NotEnoughArgumentsError(ParserError):
"""Error raised by the parser when not enough arguments are found for a parameter."""
__slots__ = ()
parameter: str
"""Name of the parameter this error was raised for."""
def __init__(self, message: str, parameter: str, /) -> None:
"""Initialise a not enough arguments error.
Parameters
----------
message : str
The error message.
parameter : tanjun.abc.Parameter
The parameter this error was raised for.
"""
super().__init__(message, parameter)
class TooManyArgumentsError(ParserError):
"""Error raised by the parser when too many arguments are found for a parameter."""
__slots__ = ()
parameter: str
"""Name of the parameter this error was raised for."""
def __init__(self, message: str, parameter: str, /) -> None:
"""Initialise a too many arguments error.
Parameters
----------
message : str
The error message.
parameter : tanjun.abc.Parameter
The parameter this error was raised for.
"""
super().__init__(message, parameter)
|
import hashlib
import pytest
from mock import patch
from flask import url_for
from playhouse.test_utils import assert_query_count
from app import instance_keys, app as realapp
from auth.auth_context_type import ValidatedAuthContext
from data import model
from data.cache import InMemoryDataModelCache
from data.database import ImageStorageLocation
from endpoints.test.shared import conduct_call
from util.security.registry_jwt import generate_bearer_token, build_context_and_subject
from test.fixtures import *
@pytest.mark.parametrize(
"method, endpoint", [("GET", "download_blob"), ("HEAD", "check_blob_exists"),]
)
def test_blob_caching(method, endpoint, client, app):
digest = "sha256:" + hashlib.sha256("a").hexdigest()
location = ImageStorageLocation.get(name="local_us")
model.blob.store_blob_record_and_temp_link("devtable", "simple", digest, location, 1, 10000000)
params = {
"repository": "devtable/simple",
"digest": digest,
}
user = model.user.get_user("devtable")
access = [{"type": "repository", "name": "devtable/simple", "actions": ["pull"],}]
context, subject = build_context_and_subject(ValidatedAuthContext(user=user))
token = generate_bearer_token(
realapp.config["SERVER_HOSTNAME"], subject, context, access, 600, instance_keys
)
headers = {
"Authorization": "Bearer %s" % token,
}
# Run without caching to make sure the request works. This also preloads some of
# our global model caches.
conduct_call(
client, "v2." + endpoint, url_for, method, params, expected_code=200, headers=headers
)
with patch("endpoints.v2.blob.model_cache", InMemoryDataModelCache()):
# First request should make a DB query to retrieve the blob.
conduct_call(
client, "v2." + endpoint, url_for, method, params, expected_code=200, headers=headers
)
# Subsequent requests should use the cached blob.
with assert_query_count(0):
conduct_call(
client,
"v2." + endpoint,
url_for,
method,
params,
expected_code=200,
headers=headers,
)
@pytest.mark.parametrize(
"mount_digest, source_repo, username, expect_success",
[
# Unknown blob.
("sha256:unknown", "devtable/simple", "devtable", False),
# Blob not in repo.
("sha256:" + hashlib.sha256("a").hexdigest(), "devtable/complex", "devtable", False),
# Blob in repo.
("sha256:" + hashlib.sha256("b").hexdigest(), "devtable/complex", "devtable", True),
# No access to repo.
("sha256:" + hashlib.sha256("b").hexdigest(), "devtable/complex", "public", False),
# Public repo.
("sha256:" + hashlib.sha256("c").hexdigest(), "public/publicrepo", "devtable", True),
],
)
def test_blob_mounting(mount_digest, source_repo, username, expect_success, client, app):
location = ImageStorageLocation.get(name="local_us")
# Store and link some blobs.
digest = "sha256:" + hashlib.sha256("a").hexdigest()
model.blob.store_blob_record_and_temp_link("devtable", "simple", digest, location, 1, 10000000)
digest = "sha256:" + hashlib.sha256("b").hexdigest()
model.blob.store_blob_record_and_temp_link("devtable", "complex", digest, location, 1, 10000000)
digest = "sha256:" + hashlib.sha256("c").hexdigest()
model.blob.store_blob_record_and_temp_link(
"public", "publicrepo", digest, location, 1, 10000000
)
params = {
"repository": "devtable/building",
"mount": mount_digest,
"from": source_repo,
}
user = model.user.get_user(username)
access = [{"type": "repository", "name": "devtable/building", "actions": ["pull", "push"],}]
if source_repo.find(username) == 0:
access.append(
{"type": "repository", "name": source_repo, "actions": ["pull"],}
)
context, subject = build_context_and_subject(ValidatedAuthContext(user=user))
token = generate_bearer_token(
realapp.config["SERVER_HOSTNAME"], subject, context, access, 600, instance_keys
)
headers = {
"Authorization": "Bearer %s" % token,
}
expected_code = 201 if expect_success else 202
conduct_call(
client,
"v2.start_blob_upload",
url_for,
"POST",
params,
expected_code=expected_code,
headers=headers,
)
if expect_success:
# Ensure the blob now exists under the repo.
model.blob.get_repo_blob_by_digest("devtable", "building", mount_digest)
else:
with pytest.raises(model.blob.BlobDoesNotExist):
model.blob.get_repo_blob_by_digest("devtable", "building", mount_digest)
|
"""
MIT License
Copyright (c) 2019 Denis Yarats
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import imageio
import os
import numpy as np
def make_dir(*path_parts):
dir_path = os.path.join(*path_parts)
try:
os.mkdir(dir_path)
except OSError:
pass
return dir_path
class VideoRecorder(object):
def __init__(self, root_dir, height=256, width=256, camera_id=0, fps=10):
self.save_dir = make_dir(root_dir, 'video') if root_dir else None
self.height = height
self.width = width
self.camera_id = camera_id
self.fps = fps
self.frames = []
def init(self, enabled=True):
self.frames = []
self.enabled = self.save_dir is not None and enabled
def record(self, env):
# TODO(sff1019): Add dm-control to keep the experiments consistant
if self.enabled:
frame = env.render(mode='rgb_array')
self.frames.append(frame)
def save(self, file_name, logger=None, step=None):
if self.enabled:
path = os.path.join(self.save_dir, file_name)
imageio.mimsave(path,
self.frames,
fps=self.fps,
macro_block_size=None)
if self.enabled and logger is not None and step is not None:
# Reshape frames to match pytorch add_video
h, w, c = self.frames[0].shape
video_array = np.zeros(shape=(len(self.frames), 3, h, w))
for idx, frame in enumerate(self.frames):
video_array[idx, :, :, :] = np.transpose(
frame, (2, 0, 1)).astype('uint8')
logger.log_video(f'eval/{step}',
video_array,
step,
log_frequency=step)
|
from email.utils import formataddr
import requests
from CTFd.utils import get_app_config, get_config
def sendmail(addr, text, subject):
ctf_name = get_config("ctf_name")
mailfrom_addr = get_config("mailfrom_addr") or get_app_config("MAILFROM_ADDR")
mailfrom_addr = formataddr((ctf_name, mailfrom_addr))
mailgun_base_url = get_config("mailgun_base_url") or get_app_config(
"MAILGUN_BASE_URL"
)
mailgun_api_key = get_config("mailgun_api_key") or get_app_config("MAILGUN_API_KEY")
try:
r = requests.post(
mailgun_base_url + "/messages",
auth=("api", mailgun_api_key),
data={
"from": mailfrom_addr,
"to": [addr],
"subject": subject,
"text": text,
},
timeout=1.0,
)
except requests.RequestException as e:
return (
False,
"{error} exception occurred while handling your request".format(
error=type(e).__name__
),
)
if r.status_code == 200:
return True, "Email sent"
else:
return False, "Mailgun settings are incorrect"
|
import pyvista
pyvista.global_theme.font.label_size = 20
|
from urllib.parse import urlencode
from openpyxl import Workbook
import datetime
import json
import requests
now = datetime.datetime.now()
areaUrls = [
{'url': 'https://hakwon.sen.go.kr', 'areaNm': '서울'},
{'url': 'https://hakwon.pen.go.kr', 'areaNm': '부산'},
{'url': 'https://hakwon.dge.go.kr', 'areaNm': '대구'},
{'url': 'https://hakwon.ice.go.kr', 'areaNm': '인천'},
{'url': 'https://hakwon.gen.go.kr', 'areaNm': '광주'},
{'url': 'https://hakwon.dje.go.kr', 'areaNm': '대전'},
{'url': 'https://hakwon.use.go.kr', 'areaNm': '울산'},
{'url': 'https://hakwon.sje.go.kr', 'areaNm': '세종시'},
{'url': 'https://hakwon.goe.go.kr', 'areaNm': '경기도'},
{'url': 'https://hakwon.kwe.go.kr', 'areaNm': '강원도'},
{'url': 'https://hakwon.cbe.go.kr', 'areaNm': '충북'},
{'url': 'https://hakwon.cne.go.kr', 'areaNm': '충남'},
{'url': 'https://hakwon.jbe.go.kr', 'areaNm': '전북'},
{'url': 'https://hakwon.jne.go.kr', 'areaNm': '전남'},
{'url': 'https://hakwon.gbe.kr', 'areaNm': '경북'},
{'url': 'https://hakwon.gne.go.kr', 'areaNm': '경남'},
{'url': 'https://hakwon.jje.go.kr', 'areaNm': '제주'},
]
zoneCodes = []
searchParams = {}
# Print iterations progress
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r"):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = printEnd)
# Print New Line on Complete
if iteration == total:
print()
def hakwondata(zcode, neisUrl, areaNm, cookies):
book = Workbook()
urllist = []
excelList = []
teacherList = []
totalPage = 1
zoneNm = ""
searchParams["searchZoneCode"] = zcode
## to get total count
response = requests.post(neisUrl + '/scs_ica_cr91_005.ws', data=json.dumps(searchParams), cookies=cookies)
jsonObj = json.loads(response.content)
## total count
totalCnt = int(jsonObj["resultSVO"]["totalCount"])
print(totalCnt, "records ")
if totalCnt > 1000:
totalPage, therest = divmod(totalCnt, 1000)
if therest > 0:
totalPage = totalPage + 1
searchParams["pageSize"] = 1000
elif totalCnt < 1:
print("No records found")
return
else:
totalPage = 1
searchParams["pageSize"] = str(totalCnt)
print("Total ", totalPage, " pages...")
zoneNm = findZoneName(zcode)
for pageIndex in range(0, totalPage, 1):
print(pageIndex + 1, " page crawling... ")
## post again
searchParams["pageIndex"] = str(pageIndex + 1)
response = requests.post(neisUrl + '/scs_ica_cr91_005.ws', data=json.dumps(searchParams), cookies=cookies)
jsonObj = json.loads(response.content)
hakwonlist = jsonObj["resultSVO"]["hesIcaCr91M00DVO"]
seq = (pageIndex) * int(searchParams["pageSize"]) + 1
teacher_params = {}
curAcaAsnum = ""
progressIndex = 0
maxProgressIndex = len(hakwonlist)
printProgressBar(progressIndex, maxProgressIndex, prefix='Progress:', suffix='Complete', length=50)
for onehakwon in hakwonlist:
leSbjtNm = ""
if isinstance(onehakwon["leSbjtNm"], list):
leSbjtNm = onehakwon["leSbjtNm"][0]
else:
leSbjtNm = onehakwon["leSbjtNm"]
lessonPeriod = onehakwon["leMms"] + '개월 ' + onehakwon["lePrdDds"] + '일'
excelData = (strNow, seq, onehakwon["zoneNm"], onehakwon["acaNm"], onehakwon["gmNm"], leSbjtNm, onehakwon["faTelno"], onehakwon["totalJuso"], onehakwon["toforNmprFgr"], lessonPeriod,
onehakwon["totLeTmMmFgr"], onehakwon["thccSmtot"], onehakwon["thccAmt"], onehakwon["etcExpsSmtot"])
if curAcaAsnum != onehakwon["acaAsnum"]:
curAcaAsnum = onehakwon["acaAsnum"]
## 강사 정보 추출
teacher_params["juOfcdcCode"] = onehakwon["juOfcdcCode"]
teacher_params["acaAsnum"] = onehakwon["acaAsnum"]
teacher_params["gubunCode"] = searchParams["searchGubunCode"]
teacherList = []
response = requests.post(neisUrl + '/hes_ica_cr91_006.ws', data=json.dumps(teacher_params), cookies=cookies)
jsonObj = json.loads(response.content)
teachers = jsonObj["resultSVO"]["teacherDVOList"]
for oneteacher in teachers:
teacherList.append(oneteacher["fouKraName"])
teacherTuple = tuple(teacherList)
excelList.append(excelData + teacherTuple)
seq = seq + 1
progressIndex = progressIndex + 1
printProgressBar(progressIndex, maxProgressIndex, prefix='Progress:', suffix='Complete', length=50)
## progress bar end
# print("\n")
## 엑셀 저장
sheet = book.active
sheet.append(('크롤링일', '순번', '지역', '학원명', '교습과정', '교습과목', '전화번호', '주소', '정원', '교습기간', '총교습시간(분)', '교습비 합계', '교습비', '기타경비', '강사'))
for row in excelList:
sheet.append(row)
filename = './data/hakwoncrawling' + '-' + areaNm + '-' + zoneNm
if searchParams["searchClassName"] != '':
filename = filename + '-' + searchParams["searchClassName"]
if searchParams["searchGubunCode"] == '1':
filename = filename + '-' + '학원'
elif searchParams["searchGubunCode"] == '2':
filename = filename + '-' + '교습소'
book.save(filename + '-' + strNow + '.xlsx')
## 지역명 찾기
def findZoneName(zonecode):
for onecode in zoneCodes:
if onecode["zoneCode"] == zonecode:
return onecode["zoneNm"]
return ""
## 하위 지역 리스트 만들기
def getSearchZoneCodeList(areaIndex, cookies):
params={}
response = requests.post(areaUrls[areaIndex]["url"] + '/scs_ica_cr91_001.ws', data=json.dumps(params), cookies=cookies)
jsonObj = json.loads(response.content)
if jsonObj["result"]["status"] == "success":
searchZoneCodeList = jsonObj["resultSVO"]["searchZoneCodeList"]
for zonecode in searchZoneCodeList:
zoneCodes.append({"zoneCode": zonecode["zoneCode"], "zoneNm": zonecode["zoneNm"]})
return 1
else:
return 0
def readSearchConfig():
with open('./config.json', 'rt', encoding='UTF-8') as json_file:
p = json.load(json_file)
searchParams["pageIndex"] = p["pageIndex"]
searchParams["pageSize"] = p["pageSize"]
searchParams["checkDomainCode"] = p["checkDomainCode"]
searchParams["juOfcdcCode"] = p["juOfcdcCode"]
searchParams["acaAsnum"] = p["acaAsnum"]
searchParams["gubunCode"] = p["gubunCode"]
searchParams["searchYn"] = p["searchYn"]
searchParams["searchGubunCode"] = p["searchGubunCode"]
searchParams["searchName"] = p["searchName"]
searchParams["searchZoneCode"] = p["searchZoneCode"]
searchParams["searchKindCode"] = p["searchKindCode"]
searchParams["searchTypeCode"] = p["searchTypeCode"]
searchParams["searchCrseCode"] = p["searchCrseCode"]
searchParams["searchCourseCode"] = p["searchCourseCode"]
searchParams["searchClassName"] = p["searchClassName"]
if __name__ == "__main__":
strNow = now.strftime('%y%m%d')
neisUrl = 'https://www.neis.go.kr'
areaIndex = "0"
# 검색 조건 불러오기 추가
# config.json 에서 해당 Parameter를 미리 불러옴
readSearchConfig()
# 학원정보 데이터 수집 진행중
# 2020.2.25 방순호
# for zcode in
while int(areaIndex) >= 0:
## Initialize
searchParams["pageIndex"] = "1"
searchParams["pageSize"] = "1"
for aindex in range(0,len(areaUrls),1):
print(str(aindex) + " : " + areaUrls[aindex]["areaNm"])
areaIndex = input("교육청을 선택해 주세요 (종료 'q') : ")
if areaIndex == 'q':
exit(0)
## cookie 정보 저장
# request = Request(neisUrl)
params = {}
# params["paramJson"] = "%7B%7D"
response = requests.get(areaUrls[int(areaIndex)]["url"] + '/edusys.jsp?page=scs_m80000', params={})
cookies = response.cookies
## 지역 리스트 불러오기
zoneCodes = []
if getSearchZoneCodeList(int(areaIndex), cookies) == 0:
print("지역을 불러오는 도중 오류가 발생했습니다.")
continue
zoneIndex = ""
while zoneIndex == "":
for zoneCode in zoneCodes:
print(zoneCode["zoneCode"] + " : " + zoneCode["zoneNm"])
zoneIndex = input("원하시는 지역 번호를 입력해 주세요 (종료 'q') : ")
if zoneIndex == 'q':
exit(0)
if zoneIndex.strip() != "":
break
searchGubun = ""
while searchGubun == "":
searchGubun = input("1 - 학원, 2 - 교습소 (종료 'q') : ")
if searchGubun == 'q':
exit(0)
if searchGubun == '1' or searchGubun == '2':
searchParams["searchGubunCode"] = searchGubun
break
else:
searchGubun = ""
searchWord = input("원하시는 검색어를 입력해 주세요 (없으면 걍 엔터) : ")
searchParams["searchClassName"] = searchWord.strip()
hakwondata(zoneIndex, areaUrls[int(areaIndex)]["url"], areaUrls[int(areaIndex)]["areaNm"], cookies)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from conans import tools
from conans.client.tools.win import get_cased_path
from conans.test.utils.test_files import temp_folder
import os
import platform
from conans.util.files import mkdir
class GetCasedPath(unittest.TestCase):
@unittest.skipUnless(platform.system() == "Windows", "Requires Windows")
def test_case_existing(self):
folder = get_cased_path(temp_folder())
p1 = os.path.join(folder, "MyFolder", "Subfolder")
mkdir(p1)
self.assertEqual(p1, get_cased_path(p1)) # Idempotent
self.assertEqual(p1, get_cased_path(os.path.join(folder, "myfolder", "subfolder")))
def test_case_not_existing(self):
current_dir = get_cased_path(os.getcwd())
non_existing_path = os.path.join(current_dir, "this", "Path", "does", "NOT", "Exists")
p = get_cased_path(non_existing_path) # If not exists from the root, returns as is
self.assertEqual(p, non_existing_path)
@unittest.skipUnless(platform.system() == "Windows", "Requires Windows")
def test_case_partial_exists(self):
folder = get_cased_path(temp_folder())
p1 = os.path.join(folder, "MyFolder", "Subfolder")
mkdir(p1)
non_existing_path = os.path.join(folder, "myfolder", "subfolder", "not-existing")
# The first part of the path will be properly cased.
self.assertEqual(os.path.join(p1, "not-existing"),
get_cased_path(non_existing_path))
class UnixPathTest(unittest.TestCase):
def test_msys_path(self):
self.assertEqual('/c/windows/system32', tools.unix_path('C:\\Windows\\System32',
path_flavor=tools.MSYS2))
def test_cygwin_path(self):
self.assertEqual('/cygdrive/c/windows/system32', tools.unix_path('C:\\Windows\\System32',
path_flavor=tools.CYGWIN))
def test_wsl_path(self):
self.assertEqual('/mnt/c/Windows/System32', tools.unix_path('C:\\Windows\\System32',
path_flavor=tools.WSL))
def test_sfu_path(self):
self.assertEqual('/dev/fs/C/windows/system32', tools.unix_path('C:\\Windows\\System32',
path_flavor=tools.SFU))
|
import random
import secrets
import string
import typing
from dataclasses import dataclass, field
from morpfw.crud import Schema
def current_userid(request, data, model):
if data["userid"] is None:
data["userid"] = request.identity.userid
return data["userid"]
def generate_identity(request, data, model) -> str:
if data["api_identity"]:
return data["api_identity"]
return secrets.token_urlsafe(32)
@dataclass
class APIKeySchema(Schema):
userid: typing.Optional[str] = field(
default=None,
metadata={"format": "uuid", "index": True, "compute_value":
current_userid, 'editable': False, 'initializable': False},
)
name: typing.Optional[str] = field(default=None, metadata={"required": True})
api_identity: typing.Optional[str] = field(
default=None, metadata={"compute_value": generate_identity, "index":
True, 'initializable': False, 'editable': False},
)
api_secret: typing.Optional[str] = field(
default=None, metadata={"editable": False, "hidden": True,
'initializable':False}
)
|
import urllib
from googleapiclient import discovery
from PIL import Image
class ImageSearch(object):
CUSTOM_SEARCH_API = 'AIzaSyBFoF4sDsSj6FV8O-cYsyHbU9stfIrACJg'
MENU_PICTURE_CSE = '000057874177480001711:2ywzhtb3u6q'
def __init__(self):
self.service = discovery.build('customsearch',
'v1',
developerKey=self.CUSTOM_SEARCH_API)
def Search(self, text, num=5):
response = self.service.cse().list(q=text,
cx=self.MENU_PICTURE_CSE,
searchType='image',
safe='high',
num=num).execute()
image_link_list = []
if not response or 'items' not in response:
return None
for item in response['items']:
if 'link' in item and 'image':
image_link_list.append(item['link'])
return image_link_list
def main():
num = 5
thumbnail_size = (400, 300)
background = Image.new('RGBA', (thumbnail_size[0], thumbnail_size[1] * num),
(255, 255, 255, 255))
image_search = ImageSearch()
image_link_list = image_search.Search('grilled steak', num=num)
for i, image_link in enumerate(image_link_list):
tmp_image_file = '/tmp/img%d' % i
urllib.urlretrieve(image_link, tmp_image_file)
im = Image.open(tmp_image_file)
im.thumbnail(thumbnail_size)
offset = (0, thumbnail_size[1] * i)
background.paste(im, offset)
background.show()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
import telnetlib
import time
TELNET_PORT = 23
TELNET_TIMEOUT = 6
def main():
ip_addr = "184.105.247.70"
username = "pyclass"
password = "88newclass"
telnet_conn = telnetlib.Telnet(ip_addr, TELNET_PORT, TELNET_TIMEOUT)
output = telnet_conn.read_until("sername:", TELNET_TIMEOUT)
telnet_conn.write(username + '\n')
output = telnet_conn.read_until("ssword", TELNET_TIMEOUT)
telnet_conn.write(password + '\n')
time.sleep(2)
output = telnet_conn.read_very_eager()
telnet_conn.write("show ip interface brief" + '\n')
time.sleep(2)
output = telnet_conn.read_very_eager()
print output
telnet_conn.close()
if __name__ == "__main__":
main()
|
# Copyright 2013 mysqlapi authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from unittest import TestCase
from django.conf import settings
from django.test.utils import override_settings
from mysqlapi.api.management.commands import s3
import mock
class S3TestCase(TestCase):
@override_settings(S3_ACCESS_KEY="access", S3_SECRET_KEY="secret")
def test_connection_should_get_keys_from_settings(self):
access = settings.S3_ACCESS_KEY
secret = settings.S3_SECRET_KEY
with mock.patch("boto.s3.connection.S3Connection") as s3con:
s3.connect()
s3con.assert_called_with(access, secret)
@override_settings(S3_BUCKET="bucket")
def test_get_buckets_from_settings(self):
bucket = settings.S3_BUCKET
with mock.patch("boto.s3.connection.S3Connection") as s3con:
conn = mock.Mock()
s3_instance = s3con.return_value
s3_instance.return_value = conn
s3.bucket()
s3_instance.get_bucket.assert_called_with(bucket)
def test_last_key(self):
m = "mysqlapi.api.management.commands.s3.bucket"
with mock.patch(m) as bucket_mock:
key = mock.Mock()
key.get_contents_as_string.return_value = "last_key"
bucket = mock.Mock()
bucket.get_key.return_value = key
bucket_mock.return_value = bucket
self.assertEqual("last_key", s3.last_key())
def test_store_data(self):
with mock.patch("mysqlapi.api.management.commands.s3.bucket"):
with mock.patch("boto.s3.key.Key") as Key:
key = Key.return_value
s3.store_data("data")
key.set_contents_from_string.assert_any_call("data")
def test_store_data_should_use_uuid_in_key_name(self):
with mock.patch("mysqlapi.api.management.commands.s3.bucket"):
with mock.patch("boto.s3.key.Key") as Key:
key = Key.return_value
with mock.patch("uuid.uuid4"):
s3.store_data("data")
key.set_contents_from_string.assert_called_with(Key().name)
def test_store_data_should_store_last_key(self):
with mock.patch("mysqlapi.api.management.commands.s3.bucket"):
with mock.patch("uuid.uuid4") as uuid4:
uuid4.return_value = mock.Mock(hex="uuid")
key = s3.store_data("data")
self.assertEqual("uuid", key.name)
def test_get_data(self):
m = "mysqlapi.api.management.commands.s3.bucket"
with mock.patch(m) as bucket_mock:
key = mock.Mock()
key.get_contents_as_string.return_value = "last_key"
bucket = mock.Mock()
bucket.get_key.return_value = key
bucket_mock.return_value = bucket
self.assertEqual("last_key", s3.get_data())
|
"""Special addresses in hex values"""
IPV4_LOCALHOST = 0x7F_00_00_01 # 127.0.0.1, or 2130706433
IPV6_LOCALHOST = 0x1 # ::1
|
from __future__ import absolute_import, unicode_literals
import unittest
from draftjs_exporter.error import ConfigException
from draftjs_exporter.options import Options
class TestOptions(unittest.TestCase):
def test_str(self):
self.assertEqual(str(Options('unordered-list-item', 'li')), '<Options unordered-list-item li {} None None>')
def test_eq(self):
self.assertEqual(Options('unordered-list-item', 'li'), Options('unordered-list-item', 'li'))
def test_not_eq(self):
self.assertNotEqual(Options('unordered-list-item', 'li'), Options('unordered-list-item', 'p'))
def test_for_block_full(self):
self.assertEqual(Options.for_block({'unordered-list-item': 'li'}, 'unordered-list-item'), Options('unordered-list-item', 'li'))
def test_for_block_half(self):
self.assertEqual(Options.for_block({'unordered-list-item': 'li'}, 'unordered-list-item'), Options('unordered-list-item', 'li'))
def test_for_block_simplest(self):
self.assertEqual(Options.for_block({'unordered-list-item': 'li'}, 'unordered-list-item'), Options('unordered-list-item', 'li'))
def test_for_block_uses_fallback(self):
self.assertEqual(Options.for_block({'header-one': 'h1', 'fallback': 'div'}, 'header-two'), Options('header-two', 'div'))
def test_for_block_raises_missing_type(self):
with self.assertRaises(ConfigException):
Options.for_block({'header-one': 'h1'}, 'header-two')
def test_for_block_raises_missing_element(self):
with self.assertRaises(ConfigException):
Options.for_block({'header-one': {}}, 'header-one')
def test_for_style_full(self):
self.assertEqual(Options.for_style({'ITALIC': 'em'}, 'ITALIC'), Options('ITALIC', 'em'))
def test_for_style_half(self):
self.assertEqual(Options.for_style({'ITALIC': 'em'}, 'ITALIC'), Options('ITALIC', 'em'))
def test_for_style_simplest(self):
self.assertEqual(Options.for_style({'ITALIC': 'em'}, 'ITALIC'), Options('ITALIC', 'em'))
def test_for_style_uses_fallback(self):
self.assertEqual(Options.for_style({'BOLD': 'strong', 'FALLBACK': 'span'}, 'CODE'), Options('CODE', 'span'))
def test_for_style_raises_missing_type(self):
with self.assertRaises(ConfigException):
Options.for_style({'BOLD': 'strong'}, 'CODE')
def test_for_style_raises_missing_element(self):
with self.assertRaises(ConfigException):
Options.for_style({'BOLD': {}}, 'BOLD')
def test_for_entity_full(self):
self.assertEqual(Options.for_entity({'HORIZONTAL_RULE': 'hr'}, 'HORIZONTAL_RULE'), Options('HORIZONTAL_RULE', 'hr'))
def test_for_entity_half(self):
self.assertEqual(Options.for_entity({'HORIZONTAL_RULE': 'hr'}, 'HORIZONTAL_RULE'), Options('HORIZONTAL_RULE', 'hr'))
def test_for_entity_simplest(self):
self.assertEqual(Options.for_entity({'HORIZONTAL_RULE': 'hr'}, 'HORIZONTAL_RULE'), Options('HORIZONTAL_RULE', 'hr'))
def test_for_entity_uses_fallback(self):
self.assertEqual(Options.for_entity({'HORIZONTAL_RULE': 'hr', 'FALLBACK': 'div'}, 'TEST'), Options('TEST', 'div'))
def test_for_entity_raises_missing_type(self):
with self.assertRaises(ConfigException):
Options.for_entity({'HORIZONTAL_RULE': 'hr'}, 'TEST')
def test_for_entity_raises_missing_element(self):
with self.assertRaises(ConfigException):
Options.for_entity({'HORIZONTAL_RULE': {}}, 'HORIZONTAL_RULE')
|
from pathlib import Path
from typing import Union, List
import torch
import torch.nn.functional as F
from transformers import GPT2LMHeadModel, GPT2Tokenizer, GPT2PreTrainedModel
from transformers.generation_utils import top_k_top_p_filtering, calc_banned_bad_words_ids
from utils import utils
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
def adjust_length_to_model(length, max_sequence_length):
if length < 0 < max_sequence_length:
length = max_sequence_length
elif 0 < max_sequence_length < length:
length = max_sequence_length # No generation bigger than model size
elif length < 0:
length = MAX_LENGTH # avoid infinite loop
return length
# TODO: convert to HuggingFace pipeline
class GPT2Generation:
STOP_TOKEN = "<|endoftext|>"
def __init__(self, model: Union[str, Path, GPT2PreTrainedModel] = 'gpt2', tokenizer: str = 'gpt2', seed: int = 42):
# Set up device
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
utils.set_seed(seed, n_gpu)
# Set up model
if isinstance(model, Path) or isinstance(model, str):
model = GPT2LMHeadModel.from_pretrained(str(model))
self.model = model.to(self.device)
# Set up tokenizer
# IMPORTANT: Note that setting the pad token like this in the constructor gives the pad_token the
# pad_token_id = 50256, which normally belongs to the <EOS> token_id in GPT2. This is a very ugly
# way that works at the moment of setting the pad_token_id to the <EOS> token that is already
# included in the vocab size.
self.tokenizer = GPT2Tokenizer.from_pretrained(tokenizer, pad_token=self.STOP_TOKEN)
assert self.tokenizer.eos_token_id == self.tokenizer.pad_token_id
def __repr__(self):
return f'<GPT2Generator model_name_or_path="{self.model}">'
def __call__(self, *args, **kwargs):
return self.generate(*args, **kwargs)
def generate(self,
prompt: Union[str, List[str]],
max_len: int = 20,
sample: bool = True,
k: int = 0,
p: float = 0.9,
temperature: float = 1.0,
bad_words_ids: List[List[int]] = None,
**model_kwargs) -> List[str]:
if isinstance(prompt, str):
prompt = [prompt]
encodings_dict = self.tokenizer.batch_encode_plus(prompt, pad_to_max_length=True, return_tensors='pt')
input_ids = encodings_dict['input_ids'].to(self.device)
attention_mask = encodings_dict['attention_mask'].to(self.device)
batch_size, input_seq_len = input_ids.shape
position_ids = attention_mask.cumsum(dim=1) - 1
unfinished_sents = torch.ones(batch_size, dtype=torch.long, device=self.device)
self.model.eval()
with torch.no_grad():
for step in range(max_len):
logits, past = self.model(input_ids, attention_mask=attention_mask, position_ids=position_ids,
**model_kwargs)
# in the first decoding step, we want to use the 'real' last position for each sentence
if step == 0:
last_non_masked_idx = torch.sum(attention_mask, dim=1) - 1
next_token_logits = logits[range(batch_size), last_non_masked_idx, :]
else:
next_token_logits = logits[:, -1, :]
if bad_words_ids is not None:
# calculate a list of banned tokens according to bad words
banned_tokens = calc_banned_bad_words_ids(input_ids, bad_words_ids)
# TODO: use a vectorized operation
for batch_idx in range(batch_size):
next_token_logits[batch_idx, banned_tokens[batch_idx]] = -float("inf")
if sample:
# Temperature (higher temperature => more likely to sample low probability tokens)
if temperature != 1.0:
next_token_logits = next_token_logits / temperature
# Top-p/top-k filtering
next_token_logits = top_k_top_p_filtering(next_token_logits, top_k=k, top_p=p)
# Sample
probs = F.softmax(next_token_logits, dim=-1)
next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
else:
# Greedy decoding
next_tokens = torch.argmax(next_token_logits, dim=-1)
# either append a padding token here if <EOS> has been seen or append next token
tokens_to_add = next_tokens * unfinished_sents + self.tokenizer.pad_token_id * (1 - unfinished_sents)
# this updates which sentences have not seen an EOS token so far
# if one EOS token was seen the sentence is finished
eos_in_sents = tokens_to_add == self.tokenizer.eos_token_id
unfinished_sents.mul_((~eos_in_sents).long())
# stop when there is an EOS in each sentence
if unfinished_sents.max() == 0:
break
# Update input_ids, attention_mask and position_ids
input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1)
attention_mask = torch.cat([attention_mask, attention_mask.new_ones((batch_size, 1))], dim=1)
position_ids = torch.cat([position_ids, (position_ids[:, -1] + 1).unsqueeze(-1)], dim=1)
decoded_outputs = [self.tokenizer.decode(output, skip_special_tokens=True, clean_up_tokenization_spaces=True)
for output in input_ids[:, input_seq_len:]]
return decoded_outputs
def generate_multiple(self,
prompt: str,
max_len: int = 20,
temperature: float = 1.0,
k: int = 0,
p: float = 0.9,
num_return_sequences: int = 1,
sample: bool = True,
repetition_penalty: float = 1.0):
max_len = adjust_length_to_model(max_len, max_sequence_length=self.model.config.max_position_embeddings)
encoded_prompt = self.tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt")
encoded_prompt = encoded_prompt.to(self.device)
prompt_len = len(encoded_prompt[0])
output_sequences = self.model.generate(
input_ids=encoded_prompt,
max_length=max_len + prompt_len,
temperature=temperature,
top_k=k,
top_p=p,
repetition_penalty=repetition_penalty,
do_sample=sample,
num_return_sequences=num_return_sequences,
)
# Remove the batch dimension when returning multiple sequences
if len(output_sequences.shape) > 2:
output_sequences.squeeze_()
decoded_outputs = []
for output in output_sequences:
output = output[prompt_len:]
try:
stop_index = [i for i, x in enumerate(output) if x == self.tokenizer.eos_token_id][0]
except IndexError:
stop_index = None
output = output[:stop_index]
decoded_outputs.append(self.tokenizer.decode(output, clean_up_tokenization_spaces=True))
return decoded_outputs
def test_generate():
generator = GPT2Generation()
prompt = [
'<|endoftext|>in this paper we',
'<|endoftext|>we are trying to',
'<|endoftext|>The purpose of this workshop is to check whether we can'
]
out = generator.generate(prompt)
print(*out, sep='\n')
def test_generate_multiple():
generator = GPT2Generation()
prompt = 'in this paper we'
out = generator.generate_multiple(prompt)
print(*out, sep='\n')
|
larg = float(input('Largura da Parede:'))
alt = float(input('Altura da parede:'))
area = larg * alt
tinta = area / 2
print('Sua area de parde tem a dimessão de {} X {} \n se ua area é de {:.2f} m²'.format(larg, alt, area))
print('Para pintar a parde você precisa de {} litro de tinta'.format(tinta))
|
# coding=utf-8
#-----------------------------------------------------------
# IMPORTS
#-----------------------------------------------------------
import language
import messages
from entities import Entity
#-----------------------------------------------------------
# CLASSES
#-----------------------------------------------------------
class Light(Entity):
def __init__(self):
super(Light, self).__init__()
self.is_on = False
def switch_on(self):
self.is_on = True
def switch_off(self):
self.is_on = False
class LightCommands(Entity):
def __init__(self):
super(LightCommands, self).__init__()
self.on_message('player_command', self.__player_command,
filter=messages.for_actors_with_item_of_class(Light))
def __player_command(self, player, command):
args = command.split(' ')
if args[0] == 'tänd':
light = player.inventory.find_best_match(' '.join(args[1:]))
if not isinstance(light, Light):
player.text('Tänd vad?')
return
if light.is_on:
light_desc = light.get_description(indefinite=False)
player.text(language.sentence('{} är redan tänd.', light_desc))
return
light.switch_on()
player.emote('tände', light)
elif args[0] == 'släck':
light = player.inventory.find_best_match(' '.join(args[1:]))
if not isinstance(light, Light):
player.text('Släck vad?')
return
if not light.is_on:
light_desc = light.get_description(indefinite=False)
player.text(language.sentence('{} är redan släckt.', light_desc))
return
light.switch_off()
player.emote('släckte', light)
LightCommands()
|
from django.contrib import admin
from moduleevaluation.files.models import Document
admin.site.register(Document)
# Register your models here.
|
with open("day-10.txt") as f:
nav_sys = f.read().rstrip().splitlines()
pairs = {
")": "(",
"]": "[",
"}": "{",
">": "<",
}
points = {
")": 3,
"]": 57,
"}": 1197,
">": 25137,
}
score = 0
for line in nav_sys:
brackets = []
for char in line:
if char in pairs:
if brackets and pairs[char] == brackets[-1]:
brackets.pop()
else:
score += points[char]
break
else:
brackets.append(char)
print(score)
|
#!/usr/bin/python
#from gpiozero import Button
import signal
import os, sys
import time
#import uinput
from evdev import uinput, UInput, ecodes as e
import RPi.GPIO as gpio
#from smbus import SMBus
DEBUG = False
Debounce = 0.01
R_slider = 4 # uinput.KEY_VOLUMEUP, Right slider
L_slider = 27 # uinput.KEY_VOLUMEDOWN, Left slider
C_sldier = 17 # uinput.KEY_MUTE, Click slider
Turbo = 5 # uinput.KEY_T, Alt key/turbo button, center Top left
Mode = 26 # uinput.BTN_MODE, Mode/clear button , center top right
Start = 20 # uinput.BTN_START, Start Button, center bottom left
Select = 6 # uinput.BTN_SELECT, Select Button, center bottom right
Btn_A = 1 # uinput.BTN_A, A Button, right bottom
Btn_B = 16 # uinput.BTN_B, B Button, right right
Btn_X = 24 # uinput.BTN_X, X Button, right top
Btn_Y = 12 # uinput.BTN_Y, Y Button, right down
Btn_U = 11 # uinput.BTN_DPAD_UP, Up Button, left top
Btn_L = 9 # uinput.BTN_DPAD_LEFT, Left Button, left left
Btn_R = 0 # uinput.BTN_DPAD_RIGHT, Right Button, left right
Btn_D = 13 # uinput.BTN_DPAD_DOWN, Down Button, left down
Btn_LT = 10 # uinput.BTN_TL, Left Trigger
Btn_RT = 23 # uinput.BTN_TR, Right Trigger
buttonList = [R_slider, L_slider, C_sldier, Turbo, Mode, Start, Select,
Btn_A, Btn_B, Btn_X, Btn_Y, Btn_U,
Btn_L, Btn_R, Btn_D, Btn_LT, Btn_RT]
keyList = {
R_slider: e.KEY_VOLUMEUP, # Right slider
L_slider: e.KEY_VOLUMEDOWN, # Left slider
C_sldier: e.KEY_MUTE, # Click slider
Turbo: e.KEY_C, # Alt key/turbo button, center Top left
Mode: e.KEY_V, # Mode/clear button , center top right
Start: e.KEY_ENTER, # Start Button, center bottom left
Select: e.KEY_SPACE, # Select Button, center bottom right
Btn_A: e.KEY_LEFTCTRL, # A Button, right bottom
Btn_B: e.KEY_LEFTALT, # B Button, right right
Btn_X: e.KEY_Z, # X Button, right top
Btn_Y: e.KEY_X, # Y Button, right down
Btn_U: e.KEY_UP, # Up Button, left top
Btn_L: e.KEY_LEFT, # Left Button, left left
Btn_R: e.KEY_RIGHT, # Right Button, left right
Btn_D: e.KEY_DOWN, # Down Button, left down
Btn_LT: e.KEY_Q, # Left Trigger
Btn_RT: e.KEY_W, # Right Trigger
}
os.system("sudo modprobe uinput")
gpio.setwarnings(False)
gpio.setmode(gpio.BCM)
gpio.setup(buttonList, gpio.IN, pull_up_down=gpio.PUD_UP)
try:
ui = UInput({e.EV_KEY: keyList.values()}, name="retrogame", bustype=e.BUS_USB)
except uinput.UInputError as e:
sys.stdout.write(e.message)
sys.stdout.write("Have you tried running as root? sudo {}".format(sys.argv[0]))
sys.exit(0)
def log(msg):
sys.stdout.write(str(datetime.now()))
sys.stdout.write(": ")
sys.stdout.write(msg)
sys.stdout.write("\n")
sys.stdout.flush()
def handle_button(pin):
key = keyList[pin]
time.sleep(Debounce)
if pin >= 1000:
state = analog_states[pin-1000]
else:
state = 0 if gpio.input(pin) else 1
ui.write(e.EV_KEY, key, state)
ui.syn()
if DEBUG:
log("Pin: {}, KeyCode: {}, Event: {}".format(pin, key, 'press' if state else 'release'))
for button in buttonList:
gpio.add_event_detect(button, gpio.BOTH, callback=handle_button, bouncetime=1)
while True:
try:
time.sleep(0.01)
except KeyboardInterrupt:
print("Stopping")
sys.exit(0)
'''
buttonList = [
[4, uinput.KEY_VOLUMEUP], # Right slider
[27, uinput.KEY_VOLUMEDOWN], # Left slider
[17, uinput.KEY_MUTE], # Click slider
[5, uinput.KEY_LEFTALT], # Alt key/turbo button, center Top left
[26, uinput.KEY_LEFTCTRL], # Mode/clear button , center top right
[20, uinput.KEY_ENTER], # Start Button, center bottom left
[6, uinput.KEY_SPACE], # Select Button, center bottom right
[1, uinput.KEY_A], # A Button, right bottom
[16, uinput.KEY_B], # B Button, right right
[24, uinput.KEY_X], # X Button, right top
[12, uinput.KEY_Y], # Y Button, right down
[11, uinput.KEY_UP], # Up Button, left top
[9, uinput.KEY_LEFT], # Left Button, left left
[0, uinput.KEY_RIGHT], # Right Button, left right
[13, uinput.KEY_DOWN], # Down Button, left down
[10, uinput.KEY_L], # Left Trigger
[23, uinput.KEY_R], # Right Trigger
]
# Decontruct the lists that are indexed the same for recall
eventList = [item[1] for item in buttonList]
pinList = [item[0] for item in buttonList]
device = uinput.Device(eventList)
buttons = []
for pin in pinList:
# create a button with 5ms bounce timer
buttons.append(Button(pin, bounce_time=0.005))
time.sleep(0.125)
print('Ready')
while(1):
for idx in range(0,len(pinList)):
if(buttons[idx].is_pressed):
#device.emit_click(eventList[idx])
device.emit(eventList[idx], 1)
else:
device.emit(eventList[idx], 0)
time.sleep(0.0165)
'''
|
# -*- coding: utf-8 -*-
from sqlalchemy.exc import IntegrityError
class MetadataManagerReadError(Exception):
pass
class InvalidMimeTypeError(Exception):
def __init__(self, mimetype):
msg = "Invalid mimetype '%s'" % str(mimetype)
super(InvalidMimeTypeError, self).__init__(msg)
class NoConfigFoundError(Exception):
def __init__(self, *args, **kwargs):
msg = ('No configuration file found. Please define one:\n'
'\t* config/local.py\n'
'\t* Set the environment variable $SHIVA_CONFIG pointing to a '
'config file.\n'
'\t* $XDG_CONFIG_HOME/shiva/config.py which defaults to \n'
'\t $HOME/.config/shiva/config.py')
super(NoConfigFoundError, self).__init__(msg)
class NoUploadPathConfigError(Exception):
def __init__(self):
msg = "No UPLOAD_PATH config defined."
super(NoUploadPathConfigError, self).__init__(msg)
class InvalidFileTypeError(Exception):
pass
class ObjectExistsError(Exception):
pass
|
import os
import aiohttp
import time
import re
import logging
import glob
import random
from datetime import datetime
from contextlib import asynccontextmanager, contextmanager
from PIL import Image, ImageDraw, ImageFont
from athena_bot import utils
from datetime import datetime, timedelta
logger = logging.getLogger(__name__)
def rounded_rectangle(self: ImageDraw, xy, corner_radius, fill=None, outline=None):
upper_left_point = xy[0]
bottom_right_point = xy[1]
self.pieslice([upper_left_point, (upper_left_point[0] + corner_radius * 2, upper_left_point[1] + corner_radius * 2)],
180, 270, fill=fill, outline=outline
)
self.pieslice([(bottom_right_point[0] - corner_radius * 2, bottom_right_point[1] - corner_radius * 2), bottom_right_point],
0, 90, fill=fill, outline=outline
)
self.pieslice([(upper_left_point[0], bottom_right_point[1] - corner_radius * 2), (upper_left_point[0] + corner_radius * 2, bottom_right_point[1])],
90, 180, fill=fill, outline=outline
)
self.pieslice([(bottom_right_point[0] - corner_radius * 2, upper_left_point[1]), (bottom_right_point[0], upper_left_point[1] + corner_radius * 2)],
270, 360, fill=fill, outline=outline
)
self.rectangle([
(upper_left_point[0], upper_left_point[1] + corner_radius),
(bottom_right_point[0], bottom_right_point[1] - corner_radius)],
fill=fill, outline=fill
)
self.rectangle([
(upper_left_point[0] + corner_radius, upper_left_point[1]),
(bottom_right_point[0] - corner_radius, bottom_right_point[1])],
fill=fill, outline=fill
)
#self.line([(upper_left_point[0] + corner_radius, upper_left_point[1]), (bottom_right_point[0] - corner_radius, upper_left_point[1])], fill=outline)
#self.line([(upper_left_point[0] + corner_radius, bottom_right_point[1]), (bottom_right_point[0] - corner_radius, bottom_right_point[1])], fill=outline)
#self.line([(upper_left_point[0], upper_left_point[1] + corner_radius), (upper_left_point[0], bottom_right_point[1] - corner_radius)], fill=outline)
#self.line([(bottom_right_point[0], upper_left_point[1] + corner_radius), (bottom_right_point[0], bottom_right_point[1] - corner_radius)], fill=outline)
class Overwatch:
BASE_URL='https://ow-api.com/v1/stats'
CACHE_FILE=os.environ.get('OVERWATCH_DATA', './overwatch.p')
IMG_CACHE=os.environ.get('OVERWATCH_IMAGES', './cache')
CACHE_EXPIRE=os.environ.get('OVERWATCH_EXPIRE', 60)
HERO_IMAGES='./assets/images/'
FONT_FILES='./assets/fonts/'
BG_IMAGES='./assets/backgrounds/'
def __init__(self):
logger.info('Initializing Overwatch')
self.cache = utils.read_datafile(self.CACHE_FILE)
self.font_cache = {}
async def get_stats(self, battletag, refresh=False):
if not refresh:
cached_bt = self.cache.get(battletag)
if cached_bt and self.__check_expiration(cached_bt):
logger.debug(f'cache hit on {battletag} stats')
return cached_bt
async with aiohttp.ClientSession() as client:
logger.debug(f'getting fresh {battletag} stats')
try:
async with client.get(f'{self.BASE_URL}/pc/us/{battletag.replace("#","-")}/complete') as res:
assert res.status == 200
fresh_data = await res.json()
fresh_data.update({'accessed': datetime.now()})
self.cache.update({battletag: fresh_data})
utils.write_datafile(self.CACHE_FILE, self.cache)
return fresh_data
except AssertionError as e:
logger.info(f'error reading stats for {battletag}')
return False
async def full_refresh(self, forced=False):
for tag in self.cache.keys():
await self.get_stats(tag, refresh=forced)
def find_best(self, role):
role = 'support' if role in ['healer', 'support'] else role
role = 'damage' if role in ['dps', 'damage dealer'] else role
role = 'tank' if role in ['tank', 'shield'] else role
role_list = []
for battletag, data in self.cache.items():
ratings = data.get('ratings')
if not ratings:
continue
for rating in ratings:
if rating['role'] == role:
role_list.append((battletag, rating['level']))
break
if not role_list:
return None
role_list.sort(key=lambda t: -t[1])
if len(role_list) == 1:
return role_list[0][0]
result = {}
for i in range(len(role_list)):
tag_a, level_a = role_list[i]
tag_b, level_b = role_list[i+1]
if level_a > level_b:
return [tag_a,]
result.update({tag_a, tag_b})
if i+1 < len(role_list):
i += 1
return list(result)
def get_font(self, font_name, size):
try:
font = self.font_cache[(font_name, size)]
except KeyError as e:
logger.debug(f'font {font_name} not found, loading to cache')
font = ImageFont.truetype(os.path.join(self.FONT_FILES, font_name), size)
self.font_cache.update({(font_name, size): font})
return font
def __check_expiration(self, cached_data):
now = datetime.now()
accessed = cached_data.get('accessed', False)
return accessed and now < accessed + timedelta(minutes=self.CACHE_EXPIRE)
def top_hero(self, data):
if data['private']:
return None
games = [(k, int(v['timePlayed'].replace(':',''))) for k, v in data['quickPlayStats']['topHeroes'].items()]
top = sorted(games, key=lambda x: x[1], reverse=True) #HAAAAA
return top[0][0]
async def __cached_image(self, url, key=None, expire=None, ext='png') -> Image:
if not key:
match = re.search(r'([a-zA-Z0-9-]*\.'+ext+')$', url)
key = match.groups()[0]
path = os.path.join(self.IMG_CACHE, key + '.' + ext)
try:
if expire:
file_time = os.path.getmtime(path)
is_expired = ((time.time() - file_time) / 3600 > 24*expire)
if is_expired:
raise FileNotFoundError()
return Image.open(path)
except FileNotFoundError as e:
logger.debug(f'image {key} not found, loading to cache')
if not url:
return
async with aiohttp.ClientSession() as client:
try:
async with client.get(url) as res:
assert res.status == 200
image_to_cache = await res.read()
with open(path, 'wb') as image_file:
image_file.write(image_to_cache)
return Image.open(path)
except AssertionError as e:
logger.debug(f'error downloading image {key}')
return None
return None
def line_size(self, line, font:ImageFont):
bbox = font.getmask(line).getbbox()
return (bbox[2], bbox[3])
@asynccontextmanager
async def get_cached_image(self, url, key=None, expire=None, ext='png'):
image = await self.__cached_image(url, key, expire, ext)
try:
yield image
finally:
if image:
image.close()
@asynccontextmanager
async def rating_badge(self, rating):
im = Image.new('RGBA', (80, 25), 'rgba(0,0,0,0)')
async with self.get_cached_image(rating['roleIcon'], key=rating['role'], expire=30) as role_icon, self.get_cached_image(rating['rankIcon'], expire=30) as rank_icon:
font = self.get_font('RobotoCondensed-Bold.ttf', 15)
im.alpha_composite(role_icon.resize((15, 15)), (4, 6))
d = ImageDraw.Draw(im)
d.text((24, 5), str(rating['level']), font=font, fill='white')
im.alpha_composite(rank_icon.resize((20, 20)), (26 + self.line_size(str(rating['level']), font)[0], 4))
try:
yield im
finally:
im.close()
@contextmanager
def level_badge(self, level, prestige):
BRONZE = ('white', '#9c4b30')
SILVER = ('#2e2d46', '#c0c0c0')
GOLD = ('#692e00', '#ffd700')
PLATINUM = SILVER
DIAMOND = ('#2e2d46', '#d1cded')
HEIGHT = 25
font = self.get_font('RobotoCondensed-Bold.ttf', 20)
symbol = self.get_font('Symbola.otf', 20)
mod_prestige = (prestige % 6)
level_width = self.line_size(str(level), font)[0]
color = BRONZE
if prestige > 5:
color = SILVER
if prestige > 11:
color = GOLD
if prestige > 17:
color = PLATINUM
if prestige > 23:
color = DIAMOND
if prestige > 29:
mod_prestige = 5
if mod_prestige > 0:
width = level_width + self.line_size('★'*mod_prestige, symbol)[0] + 15
else:
width = level_width + 10
im = Image.new('RGBA', (width, HEIGHT), 'rgba(0,0,0,0)')
d = ImageDraw.Draw(im)
f_color, bg_color = color
rounded_rectangle(d, ((0, 0), (width-2, HEIGHT-2)), 7, fill=bg_color)
d.text((5, 0), str(level), font=font, fill=f_color)
if mod_prestige > 0:
rounded_rectangle(d, ((level_width+10, 1), (width-3, HEIGHT-3)), 7, fill='white')
d.rectangle(((level_width+10, 1), (width-10, HEIGHT-3)), fill='white')
star_color = GOLD[1] if prestige > 16 else f_color if prestige > 5 else bg_color
d.text((self.line_size(str(level), font)[0] + 11, -1), '★'*mod_prestige, font=symbol, fill=star_color)
try:
yield im
finally:
im.close()
def total_stats(self, data, key):
try:
stats = data[key]['careerStats']['allHeroes']['game']
games = stats['gamesPlayed']
try:
hours, minutes, secs = re.match(r'(\d+):(\d{2}):(\d{2})', stats['timePlayed']).groups()
time = int(hours) + (int(minutes) / 60) + (int(secs) / 3600)
except AttributeError as e:
minutes, secs = re.match(r'(\d{2}):(\d{2})', stats['timePlayed']).groups()
time = (int(minutes) / 60) + (int(secs) / 3600)
except KeyError as e:
games = 0
time = 0
return (games, time)
@asynccontextmanager
async def top_bar_image(self, data, width):
im = Image.new('RGBA', (width, 70), (0,0,0,180))
# draws the user icon
icon_key = data['name'].replace('#', '-') + 'icon'
async with self.get_cached_image(data['icon'], icon_key, 1) as icon_img:
icon_img = icon_img.resize((50, 50))
im.paste(icon_img, (10, 10))
# draws name and number
d = ImageDraw.Draw(im)
name_fnt = self.get_font('RobotoCondensed-BoldItalic.ttf', 30)
number_fnt = self.get_font('RobotoCondensed-Italic.ttf', 15)
name, number = data['name'].split('#')
d.text((70,10), name, font=name_fnt, fill=(255,255,255,255))
d.text((68,40), '#'+number, font=number_fnt, fill=(255,255,255,128))
# draws level badge
with self.level_badge(data['level'], data['prestige']) as lv_img:
im.alpha_composite(lv_img, (self.line_size(name, name_fnt)[0]+75, 15))
try:
yield im
finally:
im.close()
@contextmanager
def mid_bar_image(self, data, width):
if data['private']:
bar_color = (128,0,0, 180)
bar_text = 'This profile is private'
else:
bar_color = (255,255,255,180)
g_comp, t_comp = self.total_stats(data, 'competitiveStats')
g_qp, t_qp = self.total_stats(data, 'quickPlayStats')
bar_text = f'{g_comp + g_qp} Games / {int(t_comp + t_qp)} Hrs'
im = Image.new('RGBA', (width, 24), bar_color)
font = self.get_font('RobotoCondensed-Bold.ttf', 15)
d = ImageDraw.Draw(im)
d.text((10, 4), bar_text, font=font, fill='black')
try:
yield im
finally:
im.close()
@contextmanager
def background_img(self, width):
images = glob.glob(os.path.join(self.BG_IMAGES, '*.jpg'))
image_path = random.choice(images)
im = Image.open(image_path)
im.thumbnail((width, width))
im = im.convert('RGBA')
try:
yield im
finally:
im.close()
@asynccontextmanager
async def bottom_bar_img(self, data, width):
im = Image.new('RGBA', (width, 25), (0,0,0,180))
x = 0
for rating in data.get('ratings'):
async with self.rating_badge(rating) as rim:
im.alpha_composite(rim, (x, 0))
x += rim.width
try:
yield im
finally:
im.close()
@contextmanager
def hero_image(self, hero):
im = Image.open(os.path.join(self.HERO_IMAGES, hero + '.png'))
im = im.resize((10 * im.width // 31, 10 * im.height // 31), Image.HAMMING)
im = im.convert('RGBA')
try:
yield im
finally:
im.close()
async def build_profile_image(self, data):
width = 500
im = Image.new('RGBA', (width, width), (0, 0, 0, 0))
actual_height = 0
with self.background_img(width) as bg_img:
im.paste(bg_img, (0, -bg_img.height//4))
async with self.top_bar_image(data, width) as top_img:
im.alpha_composite(top_img, (0, 0))
actual_height += top_img.height
with self.mid_bar_image(data, width) as mid_img:
im.alpha_composite(mid_img, (0, top_img.height))
actual_height += mid_img.height
if data.get('ratings'):
async with self.bottom_bar_img(data, width) as bot_img:
im.alpha_composite(bot_img, (0, top_img.height + mid_img.height))
actual_height += bot_img.height
im = im.crop(((0,0, width, actual_height)))
top_hero = self.top_hero(data)
if top_hero:
with self.hero_image(top_hero) as hero_image:
height = max(hero_image.height, actual_height)
final_image = Image.new('RGBA', (width, height), (0,0,0,0))
final_image.alpha_composite(im, (0, height - im.height))
final_image.alpha_composite(hero_image, (final_image.width - hero_image.width + 50, 0))
im.close()
im = final_image
im.save(os.path.join(self.IMG_CACHE, f'badge-{data["name"].replace("#", "-")}.png'))
im.close()
async def get_profile_path(self, battletag, refresh=False):
path = os.path.join(self.IMG_CACHE, f'badge-{battletag.replace("#", "-")}.png')
if not refresh:
try:
file_time = os.path.getmtime(path)
is_expired = ((time.time() - file_time) / 3600 > (self.CACHE_EXPIRE / 60))
if is_expired:
raise FileNotFoundError()
except FileNotFoundError as e:
logger.debug(f'failed to found badge for {battletag} on cache')
return await self.get_profile_path(battletag, refresh=True)
data = await self.get_stats(battletag, refresh)
await self.build_profile_image(data)
return path
|
# MAKE SURE TO RUN THESE TESTS USING PYTHON3, NOT PYTHON2!
# AS ALWAYS, DO NOT CHANGE THE TESTS!
import subprocess
import sys
import unittest
def runcmd(cmd, input_text=None):
splitcmd=cmd.split()
return subprocess.run(splitcmd, stdout=subprocess.PIPE, input=input_text, check=True)
class HelloTests(unittest.TestCase):
def testOutput(self):
runcmd('clang -o hello hello.c')
output = runcmd('./hello').stdout
self.assertEqual(output, b'I AM A C PROGRAMMING LEGEND!!!\n')
class PrintingTests(unittest.TestCase):
def testOutput(self):
runcmd('clang -o printing printing.c')
output = runcmd('./printing').stdout
# See if output contains correct answer somewhere
self.assertNotEqual(-1, output.find(b"72"))
class TemperatureTestsBasic(unittest.TestCase):
def testTemperature(self):
runcmd('clang -o temperature temperature.c')
#output = subprocess.run('./temperature', stdout=subprocess.PIPE, input=b"97", check=True)
output = runcmd('./temperature', input_text=b"97").stdout
# See if output contains correct answer somewhere
self.assertNotEqual(-1, output.find(b"36.1"))
class TemperatureTestsAdvanced(unittest.TestCase):
def testInvalidTemperature(self):
runcmd('clang -o temperature temperature.c')
output = runcmd('./temperature', input_text=b"-500").stdout
# See if output contains correct output somewhere
self.assertNotEqual(-1, output.find(b"Invalid temperature!"))
unittest.main(verbosity=2)
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging
import time
from collections import namedtuple
import boto3
from fabric import Connection
from retrying import retry
from utils import random_alphanumeric
SnapshotConfig = namedtuple("ClusterConfig", ["ssh_key", "key_name", "vpc_id", "head_node_subnet_id"])
class EBSSnapshotsFactory:
"""Manage creation and destruction of volume snapshots."""
def __init__(self):
self.config = None
self.instance = None
self.volume = None
self.snapshot = None
self.security_group_id = None
self.ec2 = None
self.boto_client = None
def create_snapshot(self, request, subnet_id, region):
"""
Create a snapshot in a given region.
:param request: The current request
:param subnet_id: The subnet id where to get the snapshot
:param region: The region where to get the snapshot
"""
# Only one snapshot creation per factory allowed
if self.snapshot:
raise Exception("Snapshot already created")
self.ec2 = boto3.resource("ec2", region_name=region)
self.boto_client = boto3.client("ec2", region_name=region)
snapshot_config = SnapshotConfig(
request.config.getoption("key_path"),
request.config.getoption("key_name"),
self.ec2.Subnet(subnet_id).vpc_id,
subnet_id,
)
self.snapshot = self._create_snapshot(region, snapshot_config)
return self.snapshot.id
def create_existing_volume(self, request, subnet_id, region):
"""
Create a volume in a given region.
:param request: The current request
:param subnet_id: The subnet id where to get the snapshot
:param region: The region where to get the snapshot
"""
# Only one volume creation per factory allowed
if self.volume:
raise Exception("Volume already created")
self.ec2 = boto3.resource("ec2", region_name=region)
self.boto_client = boto3.client("ec2", region_name=region)
volume_config = SnapshotConfig(
request.config.getoption("key_path"),
request.config.getoption("key_name"),
self.ec2.Subnet(subnet_id).vpc_id,
subnet_id,
)
self._create_volume_process(region, volume_config)
return self.volume.id
def _create_volume_process(self, region, snapshot_config):
self.config = snapshot_config
ami_id = self._get_amazonlinux_ami()
self.security_group_id = self._get_security_group_id()
subnet = self.ec2.Subnet(self.config.head_node_subnet_id)
# Create a new volume and attach to the instance
self.volume = self._create_volume(subnet)
self.instance = self._launch_instance(ami_id, subnet)
self._attach_volume()
# Open ssh connection
self.ssh_conn = self._open_ssh_connection()
# Partitions the disk with a gpt table and 1 single partition inside
self._format_volume(self.ssh_conn)
# Stops the instance before taking the snapshot
self._release_instance()
def _create_snapshot(self, region, snapshot_config):
self._create_volume_process(region, snapshot_config)
self.snapshot = self._create_volume_snapshot()
return self.snapshot
def _create_volume_snapshot(self):
logging.info("creating snapshot...")
snapshot = self.ec2.create_snapshot(Description="parallelcluster-test-snapshot", VolumeId=self.volume.id)
while snapshot.state == "pending":
time.sleep(10)
snapshot = self.ec2.Snapshot(snapshot.id)
logging.info("Snapshot ready: %s" % snapshot.id)
return snapshot
def _format_volume(self, ssh_conn):
logging.info("Partitioning device...")
ssh_conn.run("sudo sh -c 'echo -e \"g\nn\np\n1\n\n\nw\" | fdisk /dev/sdf'", warn=True, pty=False, hide=False)
# Finds out the device name of the volume
logging.info("Finding device name...")
device_name = ssh_conn.run("readlink -f /dev/sdf").stdout.strip()
# formats the 1st partition of disk
logging.info("Formatting 1st partition...")
ssh_conn.run("sudo sh -c 'mkfs.ext4 {}1'".format(device_name))
logging.info("Mounting partition...")
ssh_conn.run("sudo mkdir /mnt/tmp")
ssh_conn.run("sudo mount {}1 /mnt/tmp".format(device_name))
logging.info("Writing test data...")
ssh_conn.run("echo 'hello world' | sudo tee -a /mnt/tmp/test.txt")
logging.info("Device ready")
def _open_ssh_connection(self):
tries = 5
logging.info("Connecting to instance %s " % self.instance.public_ip_address)
logging.info("ssh_key: %s " % self.config.ssh_key)
ssh_conn = None
while tries > 0:
try:
ssh_conn = Connection(
host=self.instance.public_ip_address,
user="ec2-user",
forward_agent=False,
connect_kwargs={"key_filename": [self.config.ssh_key]},
)
ssh_conn.open()
tries = 0
except BaseException:
logging.info("SSH connection error - retrying...")
tries -= 1
time.sleep(20)
if (ssh_conn is None) or (not ssh_conn.is_connected):
raise ConnectionError()
return ssh_conn
def _attach_volume(self):
result = self.boto_client.attach_volume(VolumeId=self.volume.id, InstanceId=self.instance.id, Device="/dev/sdf")
logging.info("Attach Volume Result: ", result)
def _create_volume(self, subnet):
vol = self.ec2.create_volume(
Size=10,
Encrypted=False,
AvailabilityZone=subnet.availability_zone,
TagSpecifications=[
{"ResourceType": "volume", "Tags": [{"Key": "name", "Value": "parallel-cluster-test-volume"}]}
],
)
logging.info("Volume Id: %s" % vol.id)
# We can check if the volume is now ready and available:
logging.info("Waiting for the volume to be ready...")
while vol.state == "creating":
vol = self.ec2.Volume(vol.id)
time.sleep(2)
logging.info("Volume ready")
return vol
def _get_security_group_id(self):
security_group_id = self.boto_client.create_security_group(
Description="security group for snapshot instance node",
GroupName="snapshot-" + random_alphanumeric(),
VpcId=self.config.vpc_id,
)["GroupId"]
self.boto_client.authorize_security_group_ingress(
GroupId=security_group_id,
IpPermissions=[{"IpProtocol": "tcp", "FromPort": 22, "ToPort": 22, "IpRanges": [{"CidrIp": "0.0.0.0/0"}]}],
)
return security_group_id
def _launch_instance(self, ami_id, subnet):
instance = self.ec2.create_instances(
ImageId=ami_id,
KeyName=self.config.key_name,
MinCount=1,
MaxCount=1,
InstanceType="t2.micro",
NetworkInterfaces=[
{
"SubnetId": subnet.id,
"DeviceIndex": 0,
"AssociatePublicIpAddress": True,
"Groups": [self.security_group_id],
}
],
TagSpecifications=[
{"ResourceType": "instance", "Tags": [{"Key": "Name", "Value": "pcluster-snapshot-instance"}]}
],
)[0]
logging.info("Waiting for instance to be running...")
while instance.state["Name"] == "pending":
time.sleep(10)
instance = self.ec2.Instance(instance.id)
logging.info("Instance state: %s" % instance.state)
logging.info("Public dns: %s" % instance.public_dns_name)
return instance
def _get_amazonlinux_ami(self):
# Finds most recent alinux ami in region
response = self.boto_client.describe_images(
Owners=["amazon"],
Filters=[
{"Name": "name", "Values": ["amzn-ami-hvm-*"]},
{"Name": "description", "Values": ["Amazon Linux AMI*"]},
{"Name": "architecture", "Values": ["x86_64"]},
{"Name": "root-device-type", "Values": ["ebs"]},
{"Name": "state", "Values": ["available"]},
],
)
amis = sorted(response["Images"], key=lambda x: x["CreationDate"], reverse=True)
return amis[0]["ImageId"]
def release_all(self):
"""Release all resources"""
self._release_instance()
self._release_volume()
self._release_snapshot()
self._release_security_group()
@retry(stop_max_attempt_number=5, wait_fixed=5000)
def _release_snapshot(self):
if self.snapshot:
logging.info("Deleting snapshot %s" % self.snapshot.id)
self.snapshot.delete()
@retry(stop_max_attempt_number=5, wait_fixed=5000)
def _release_instance(self):
if self.instance:
self.instance.terminate()
logging.info("Waiting for instance to be terminated...")
while self.instance.state["Name"] != "terminated":
time.sleep(10)
self.instance = self.ec2.Instance(self.instance.id)
logging.info("Instance terminated")
self.instance = None
@retry(stop_max_attempt_number=5, wait_fixed=5000)
def _release_volume(self):
if self.volume:
logging.info("Deleting volume %s" % self.volume.id)
self.volume.delete()
self.volume = None
def _release_security_group(self):
if self.security_group_id:
logging.info("Deleting security group %s" % self.security_group_id)
self.boto_client.delete_security_group(GroupId=self.security_group_id)
self.security_group_id = None
|
'''Autonomous driving convolutional neural network example from p. 64 of the book.
Code based on https://github.com/OSSDC/AutonomousDrivingCookbook/tree/master/AirSimE2EDeepLearning'''
from keras.models import Model
from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, Input, concatenate
from keras.optimizers import Nadam
from keras.preprocessing import image
from Generator import DriveDataGenerator
'''Runs p. 64 convolutional neural network.'''
def main():
data_generatordata_ge = DriveDataGenerator(rescale=1./255., horizontal_flip=True, brighten_range=0.4)
train_generator = data_generator.flow\
(train_dataset['image'], train_dataset['previous_state'], train_dataset['label'], batch_size=batch_size, zero_drop_percentage=0.95, roi=[76,135,0,255])
eval_generator = data_generator.flow\
(eval_dataset['image'], eval_dataset['previous_state'], eval_dataset['label'], batch_size=batch_size, zero_drop_percentage=0.95, roi=[76,135,0,255])
[sample_batch_train_data, sample_batch_test_data] = next(train_generator)
# above ross add
image_input_shape = sample_batch_train_data[0].shape[1:]
state_input_shape = sample_batch_train_data[1].shape[1:]
activation = 'relu'
#Create the convolutional stacks
pic_input = Input(shape=image_input_shape)
img_stack = Conv2D(16, (3, 3), name="convolution0", padding='same', activation=activation)(pic_input)
img_stack = MaxPooling2D(pool_size=(2,2))(img_stack)
img_stack = Conv2D(32, (3, 3), activation=activation, padding='same', name='convolution1')(img_stack)
img_stack = MaxPooling2D(pool_size=(2, 2))(img_stack)
img_stack = Conv2D(32, (3, 3), activation=activation, padding='same', name='convolution2')(img_stack)
img_stack = MaxPooling2D(pool_size=(2, 2))(img_stack)
img_stack = Flatten()(img_stack)
img_stack = Dropout(0.2)(img_stack)
#Inject the state input
state_input = Input(shape=state_input_shape)
merged = concatenate([img_stack, state_input])
# Add a few dense layers to finish the model
merged = Dense(64, activation=activation, name='dense0')(merged)
merged = Dropout(0.2)(merged)
merged = Dense(10, activation=activation, name='dense2')(merged)
merged = Dropout(0.2)(merged)
merged = Dense(1, name='output')(merged)
adam = Nadam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model = Model(inputs=[pic_input, state_input], outputs=merged)
model.compile(optimizer=adam, loss='mse')
main()
|
import unittest
from shiny_client import category
import requests
from base_test import ApiResourceTest
from shiny_client import base_client
class TestCategoryListCommand(unittest.TestCase):
RESOURCE_URL = "https://api.zalando.com/categories"
def test_instantiate(self):
base_client.PluginCommandBase.register(category.CategoryListCommand)
c = category.CategoryListCommand()
self.assertTrue(issubclass(category.CategoryListCommand, base_client.PluginCommandBase))
self.assertTrue(isinstance(category.CategoryListCommand(), base_client.PluginCommandBase))
def test_get_list(self):
c = self._get_command()
parsed_args = self._get_mocked_args()
c.perform(parsed_args)
def _get_command(self):
c = category.CategoryListCommand()
c.lang = ApiResourceTest.get_api_lang()
c.endpoint = TestCategoryListCommand.RESOURCE_URL
c.is_insecure = ApiResourceTest.is_insecure()
c.request_timeout = ApiResourceTest.get_timeout()
return c
def _get_mocked_args(self):
parsed_args = lambda: None
parsed_args.fields = ['name', 'key', 'type', 'parentKey']
parsed_args.is_machine_readable = True
return parsed_args
def test_show_schema(self):
c = category.CategoryListCommand()
# c.show_schema()
class TestCategoryClient(ApiResourceTest, unittest.TestCase):
RESOURCE_URL = "https://api.zalando.com/categories"
def _assert_resouce_list_test(self, o):
self.assertIsNotNone(o.name)
self.assertIsNotNone(o.key)
self.assertIsNotNone(o.targetGroup)
self.assertIsNotNone(o.childKeys)
self.assertTrue(o.parentKey is None or isinstance(o.parentKey, str))
def _create_resource_mgmt(self):
return category.CategoryManager()
def _get_resource_endpoint(self):
return TestCategoryClient.RESOURCE_URL
def test_find_by_name_and_outlet(self):
mgmt = self._get_resource_mgmt()
query = {"name": "s"}
stats = mgmt.get_stats(query)
is_found = 0
for c in mgmt.find_by(query):
is_found = is_found + 1
self.assertEquals(is_found, stats["totalElements"])
def test_get_one_category(self):
"""
"""
mgmt = self._get_resource_mgmt()
expected_category = next(mgmt.list_page(1))
expected_category_key = expected_category.key
category = mgmt.get(expected_category_key)
self.assertIsNotNone(category)
self.assertEquals(expected_category_key, category.key)
def test_get_not_existing_category(self):
"""
"""
mgmt = self._get_resource_mgmt()
not_existing_key = "test-test-test-test"
with self.assertRaises(requests.HTTPError):
category = mgmt.get(not_existing_key)
def test_get_stats(self):
mgmt = self._get_resource_mgmt()
result = mgmt.get_stats()
self.assertTrue("totalElements" in result)
|
import json
import py.path
import pytest
import apypie
@pytest.fixture
def fixture_dir():
return py.path.local(__file__).realpath() / '..' / 'fixtures'
@pytest.fixture
def apidoc_cache_dir(fixture_dir, tmpdir):
fixture_dir.join('dummy.json').copy(tmpdir / 'default.json')
fixture_dir.join('dummy.json').copy(tmpdir / 'default.tlh.json')
return tmpdir
@pytest.fixture
def api(fixture_dir, requests_mock, tmpdir):
with fixture_dir.join('dummy.json').open() as read_file:
data = json.load(read_file)
requests_mock.get('https://api.example.com/apidoc/v1.json', json=data)
api = apypie.Api(uri='https://api.example.com', apidoc_cache_dir=tmpdir.strpath)
# explicitly trigger loading of the apidoc
# our tests often mock Api.http_call, which breaks apidoc loading
api.apidoc
return api
@pytest.fixture
def resource(api):
return api.resource('users')
@pytest.fixture
def action(resource):
return resource.action('show')
@pytest.fixture
def foreman_api(fixture_dir, requests_mock, tmpdir):
with fixture_dir.join('foreman.json').open() as read_file:
data = json.load(read_file)
requests_mock.get('https://foreman.example.com/apidoc/v2.json', json=data)
return apypie.Api(uri='https://foreman.example.com', api_version=2, apidoc_cache_dir=tmpdir.strpath)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*
# Copyright: [CUP] - See LICENSE for details.
# Authors: Guannan Ma (@mythmgn),
"""
common function module for cup.net.async
"""
import socket
import struct
from cup.util import misc
__all__ = [
'ip_port2connaddr', 'add_stub2connaddr', 'add_future2connaddr',
'get_ip_and_port_connaddr', 'getip_connaddr', 'getport_connaddr',
'getstub_connaddr', 'getfuture_connaddr'
]
def ip_port2connaddr(peer):
"""
connaddr is a 64bit int
32 - 16 - 16 - 32
ip - port - stub - future
:param peer:
(ipaddr, port)
:return:
return a connaddr
"""
misc.check_type(peer, tuple)
ipaddr, port = peer
misc.check_type(ipaddr, str)
packed = socket.inet_aton(ipaddr)
return (struct.unpack("!L", packed)[0] << 64) | (port << 48)
def add_stub2connaddr(pack, stub):
"""
add stub into connaddr
"""
return pack | (stub << 32)
def add_future2connaddr(pack, future):
"""
add future into connaddr
"""
return pack | future
def get_ip_and_port_connaddr(pack):
"""
get (ip, port) from connaddr
"""
ipaddr = getip_connaddr(pack)
port = getport_connaddr(pack)
return (ipaddr, port)
def getip_connaddr(pack):
"""
get ip from connaddr
"""
return socket.inet_ntoa(struct.pack('!L', pack >> 64))
def getport_connaddr(pack):
"""
get port from connaddr
"""
return (pack >> 48) & 0xffff
def getstub_connaddr(pack):
"""
get stub from connaddr
"""
return (pack >> 32) & 0xffff
def getfuture_connaddr(pack):
"""
get future from conaddr
"""
return (pack) & 0xffff
# vi:set tw=0 ts=4 sw=4 nowrap fdm=indent
|
from django.db import models
from auth1.models import *
from api.managers import FeedbackManager
# Create your models here.
class Feedback(models.Model):
driver = models.ForeignKey(User, on_delete=models.CASCADE, related_name='driver_feedback')
created_by = models.ForeignKey(User, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
content = models.TextField(max_length=1000, default='')
objects = FeedbackManager()
class Meta:
verbose_name = 'Feedback'
verbose_name_plural = 'Feedbacks'
def __str__(self):
return f'{self.id}: {self.driver}'
|
import os
from pathlib import Path # noqa
here = Path(__file__).resolve(strict=True).parent.parent
__all__ = ["__version__"]
with open(os.path.join(os.path.dirname(here), '../VERSION')) as version_file:
__version__ = version_file.read().strip()
|
import numba
import numpy as np
from collections import Counter, defaultdict
class hogImg():
"""example usage:
hog = hogImg(arr=np.random.randint(1, 101, size=(19,19)))
f = hog.do_work()"""
def __init__(self, arr, oriens=6, ppc=(3, 3), cpc=(6, 6)):
self.arr = arr
self.oriens = oriens
self.ppc = ppc
self.cpc = cpc
if int(self.arr.shape[0])/self.ppc[0]-self.cpc[0]+1 < 0:
raise(ValueError("wrong dimensions - ensure array is larger or change cpc to a smaller number!"))
def __repr__(self):
return f"hogImg class with orientation: {self.oriens} ppc: {self.ppc} cpc: {self.cpc}"
def __str__(self):
return f"input array is {self.arr.shape} with orientation {self.oriens} and ppc {self.ppc} and cpc {self.cpc}"
def hog_normalize_block(self, blk, eps=1e-6):
norma = blk/np.sqrt(np.sum(blk ** 2) + eps ** 2)
return norma
def hog_gradients(self, arr):
g_row = np.zeros(arr.shape, dtype=arr.dtype)
g_row[1:-1, ...] = arr[2:, ...] - arr[:-2, ...]
g_col = np.zeros(arr.shape, dtype=arr.dtype)
g_col[..., 1:-1] = arr[..., 2:] - arr[..., :-2]
return g_row, g_col
@numba.jit
def make_hog_histogram_numba(self, arr):
bins = list(range(0, 380, 20))
hist = {}
sum_values = sum(arr.ravel())
for val in arr.ravel():
if bins[0] < val <= bins[1]:
if val not in hist:
hist[0] = 1
else:
hist[0] += 1
elif bins[1] < val <= bins[2]:
if val not in hist:
hist[1] = 1
else:
hist[1] += 1
elif bins[2] < val <= bins[3]:
if val not in hist:
hist[2] = 1
else:
hist[2] += 1
elif bins[3] < val <= bins[4]:
if val not in hist:
hist[3] = 1
else:
hist[3] += 1
elif bins[4] < val <= bins[5]:
if val not in hist:
hist[4] = 1
else:
hist[4] += 1
elif bins[5] < val <= bins[6]:
if val not in hist:
hist[5] = 1
else:
hist[5] += 1
elif bins[6] < val <= bins[7]:
if val not in hist:
hist[6] = 1
else:
hist[6] += 1
elif bins[7] < val <= bins[8]:
if val not in hist:
hist[7] = 1
else:
hist[7] += 1
elif bins[8] < val <= bins[9]:
if val not in hist:
hist[8] = 1
else:
hist[8] += 1
elif bins[9] < val <= bins[10]:
if val not in hist:
hist[9] = 1
else:
hist[9] += 1
elif bins[10] < val <= bins[11]:
if val not in hist:
hist[10] = 1
else:
hist[10] += 1
elif bins[11] < val <= bins[12]:
if val not in hist:
hist[11] = 1
else:
hist[11] += 1
elif bins[12] < val <= bins[13]:
if val not in hist:
hist[12] = 1
else:
hist[12] += 1
elif bins[13] < val <= bins[14]:
if val not in hist:
hist[13] = 1
else:
hist[13] += 1
elif bins[14] < val <= bins[15]:
if val not in hist:
hist[14] = 1
else:
hist[14] += 1
elif bins[15] < val <= bins[16]:
if val not in hist:
hist[15] = 1
else:
hist[15] += 1
elif bins[16] < val <= bins[17]:
if val not in hist:
hist[16] = 1
else:
hist[16] += 1
elif bins[17] < val <= bins[18]:
if val not in hist:
hist[17] = 1
else:
hist[17] += 1
new_dict = {}
for key, val in hist.items():
new_dict[key] = val/sum_values
return new_dict
def make_hog_histogram(self, arr):
bins = np.arange(0, 380, 20, dtype=float)
hist = defaultdict(int)
for val in arr.ravel():
if bins[0] < val <= bins[1]:
hist[0] += 1
elif bins[1] < val <= bins[2]:
hist[1] += 1
elif bins[2] < val <= bins[3]:
hist[2] += 1
elif bins[3] < val <= bins[4]:
hist[3] += 1
elif bins[4] < val <= bins[5]:
hist[4] += 1
elif bins[5] < val <= bins[6]:
hist[5] += 1
elif bins[6] < val <= bins[7]:
hist[6] += 1
elif bins[7] < val <= bins[8]:
hist[7] += 1
elif bins[8] < val <= bins[9]:
hist[8] += 1
elif bins[9] < val <= bins[10]:
hist[9] += 1
elif bins[10] < val <= bins[11]:
hist[10] += 1
elif bins[11] < val <= bins[12]:
hist[11] += 1
elif bins[12] < val <= bins[13]:
hist[12] += 1
elif bins[13] < val <= bins[14]:
hist[13] += 1
elif bins[14] < val <= bins[15]:
hist[14] += 1
elif bins[15] < val <= bins[16]:
hist[15] += 1
elif bins[16] < val <= bins[17]:
hist[16] += 1
elif bins[17] < val <= bins[18]:
hist[17] += 1
return {key: val/sum(hist.values()) for key, val in hist.items()}
def faster_hog_histogram(self, arr):
ctr = np.unique(np.digitize(arr, bins=np.arange(0, 380, 20), right=True).ravel(), return_counts=True)
return np.vstack((ctr[0], ctr[1]/sum(ctr[1])))
def fastest_hog_histogram(self, arr):
ctr = Counter(np.digitize(arr, bins=np.arange(0, 380, 20), right=True).ravel())
return {key: val/sum(ctr.values()) for key, val in ctr.items()}
def make_angles(self, row, col, eps=1e-6):
return np.round(2*np.rad2deg(np.arctan(row/(col+eps)))+180, 2)
def do_work(self):
self.g_row, self.g_col = self.hog_gradients(self.arr)
split_cells = np.split(self.arr, self.ppc[0])
split_grads_rows = np.split(self.g_row, self.ppc[0])
split_grads_cols = np.split(self.g_col, self.ppc[0])
final = np.zeros((self.cpc[0], self.arr.shape[1], self.ppc[0]))
hist_final = []
for idx, (row, col) in enumerate(zip(split_grads_rows, split_grads_cols)):
norm_row = self.hog_normalize_block(row)
norm_col = self.hog_normalize_block(col)
angs = self.make_angles(row, col)
hist = self.make_hog_histogram(angs)
hist_final.append(hist)
final[:angs.shape[0], :angs.shape[1], idx] = angs
return final, hist_final
|
"""Setuptools Module."""
from setuptools import setup, find_packages
setup(
name="githubutils",
version="0.1",
packages=find_packages(),
install_requires=['requests'],
# metadata for upload to PyPI
author="Alexander Richards",
author_email="a.richards@imperial.ac.uk",
description="Utilities for working with github",
license="MIT",
url="https://github.com/alexanderrichards/GithubUtils"
)
|
from enum import Enum
from dagster import SolidExecutionResult, execute_solid
from dagster_utils.contrib.data_repo.typing import JobId
from hca_orchestration.solids.load_hca.ingest_metadata_type import ingest_metadata_type
from hca_orchestration.support.typing import HcaScratchDatasetName, MetadataType
class FakeEnum(Enum):
FIRST = MetadataType("first")
SECOND = MetadataType("second")
THIRD = MetadataType("third")
FakeDatasetName = HcaScratchDatasetName("fake_dataset_name")
def test_fans_out_correctly():
result: SolidExecutionResult = execute_solid(
ingest_metadata_type,
input_values={
"scratch_dataset_name": FakeDatasetName
},
run_config={
"solids": {
"ingest_metadata_type": {
"config": {
"metadata_types": FakeEnum,
"prefix": "fakepath"
},
"inputs": {
"result": [JobId("abcdef")]
}
}
}
}
)
assert result.success
expected = [e.value for e in FakeEnum]
for i, res in enumerate(result.output_value("table_fanout_result")):
assert expected[i] == res
|
from setuptools import setup
setup(
name='django-bootstrap3-datetimepicker',
packages=['bootstrap3_datetime',],
package_data={'bootstrap3_datetime': ['static/bootstrap3_datetime/css/*.css',
'static/bootstrap3_datetime/js/*.js', ]},
include_package_data=True,
version='2.3',
description='Bootstrap3 compatible datetimepicker for Django projects.',
long_description=open('README.rst').read(),
author='Nakahara Kunihiko',
author_email='nakahara.kunihiko@gmail.com',
url='https://github.com/gcaprio/django-bootstrap3-datetimepicker.git',
license='Apache License 2.0',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
'Environment :: Web Environment',
'Framework :: Django',
],
zip_safe=False,
)
|
'''
Using the global view
=====================
The `addresses` view provides access to all the addresses registered in the DB,
as well as methods to create and remove them::
eth0 = ndb.interfaces['eth0']
# create an address
(ndb
.addresses
.create(address='10.0.0.1', prefixlen=24, index=eth0['index'])
.commit())
# remove it
(ndb
.addresses['10.0.0.1/24']
.remove()
.commit())
# list addresses
(ndb
.addresses
.summary()) # see also other view dump methods
Using ipaddr views
==================
Interfaces also provide address views as subsets of the global
address view::
(ndb
.interfaces['eth0']
.ipaddr
.summary())
It is possible use the same API as with the global address view::
(ndb
.interfaces['eth0']
.ipaddr
.create(address='10.0.0.1', prefixlen=24) # index is implied
.commit())
Using interface methods
=======================
Interfaces provide also simple methods to manage addresses::
(ndb
.interfaces['eth0']
.del_ip('172.16.0.1/24') # remove an existing address
.del_ip(family=AF_INET) # remove all IPv4 addresses
.add_ip('10.0.0.1/24') # add a new IP address
.add_ip(address='10.0.0.2', prefixlen=24) # if you prefer keyword args
.set('state', 'up')
.commit())
Functions `add_ip()` and `del_ip()` return the interface object, so they
can be chained as in the example above, and the final `commit()` will
commit all the changes in the chain.
The keywords to `del_ip()` are the same object field names that may be used
in the selectors or report filters::
(ndb
.interfaces['eth0']
.del_ip(prefixlen=25) # remove all addresses with mask /25
.commit())
A match function that may be passed to the `del_ip()` is the same as for
`addresses.dump().filter()`, and it gets a named tuple as the argument.
The fields are named in the same way as address objects fields. So if you
want to filter addresses by a pattern or the `prefixlen` field with a
match function, you may use::
(ndb
.interfaces['eth0']
.del_ip(lambda x: x.address.startswith('192.168'))
.commit())
(ndb
.interfaces['eth1']
.del_ip(lambda x: x.prefixlen == 25)
.commit())
An empty `del_ip()` removes all the IP addresses on the interface::
(ndb
.interfaces['eth0']
.del_ip() # flush all the IP:s
.commit())
Accessing one address details
=============================
Access an address as a separate RTNL object::
print(ndb.addresses['10.0.0.1/24'])
Please notice that address objects are read-only, you may not change them,
only remove old ones, and create new.
'''
from ..objects import RTNL_Object
from pr2modules.common import dqn2int
from pr2modules.common import basestring
from pr2modules.netlink.rtnl.ifaddrmsg import ifaddrmsg
ifaddr_spec = (ifaddrmsg
.sql_schema()
.unique_index('family',
'prefixlen',
'index',
'IFA_ADDRESS',
'IFA_LOCAL')
.foreign_key('interfaces',
('f_target', 'f_tflags', 'f_index'),
('f_target', 'f_tflags', 'f_index')))
init = {'specs': [['addresses', ifaddr_spec]],
'classes': [['addresses', ifaddrmsg]],
'event_map': {ifaddrmsg: ['addresses']}}
def norm_mask(value):
if isinstance(value, basestring):
if '.' in value:
value = dqn2int(value)
value = int(value)
return value
class Address(RTNL_Object):
table = 'addresses'
msg_class = ifaddrmsg
fields_normalize = {'prefixlen': norm_mask}
api = 'addr'
@classmethod
def _dump_where(cls, view):
if view.chain:
plch = view.ndb.schema.plch
where = '''
WHERE
main.f_target = %s AND
main.f_index = %s
''' % (plch, plch)
values = [view.chain['target'], view.chain['index']]
else:
where = ''
values = []
return (where, values)
@classmethod
def summary(cls, view):
req = '''
SELECT
main.f_target, main.f_tflags,
intf.f_IFLA_IFNAME, main.f_IFA_ADDRESS, main.f_prefixlen
FROM
addresses AS main
INNER JOIN
interfaces AS intf
ON
main.f_index = intf.f_index
AND main.f_target = intf.f_target
'''
yield ('target', 'tflags', 'ifname', 'address', 'prefixlen')
where, values = cls._dump_where(view)
for record in view.ndb.schema.fetch(req + where, values):
yield record
def mark_tflags(self, mark):
plch = (self.schema.plch, ) * 3
self.schema.execute('''
UPDATE interfaces SET
f_tflags = %s
WHERE f_index = %s AND f_target = %s
''' % plch, (mark, self['index'], self['target']))
def __init__(self, *argv, **kwarg):
kwarg['iclass'] = ifaddrmsg
self.event_map = {ifaddrmsg: "load_rtnlmsg"}
super(Address, self).__init__(*argv, **kwarg)
@staticmethod
def spec_normalize(spec):
'''
Address key normalization::
{ ... } -> { ... }
"10.0.0.1/24" -> {"address": "10.0.0.1",
"prefixlen": 24}
'''
if isinstance(spec, dict):
ret = spec
else:
ret = {}
if isinstance(spec, basestring):
addr_spec = spec.split('/')
ret['address'] = addr_spec[0]
if len(addr_spec) > 1:
ret['prefixlen'] = addr_spec[1]
return ret
def key_repr(self):
return '%s/%s %s/%s' % (self.get('target', ''),
self.get('label', self.get('index', '')),
self.get('local', self.get('address', '')),
self.get('prefixlen', ''))
|
#-*-coding:utf-8-*-
#date:2019/01/01
#@title MIT License
#
# Copyright (c) 2017 François Chollet
# Copyright (c) 2018 Joker2770
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
#输出当前使用TensorFlow版本号
print(tf.__version__)
#导入和加载数据
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
#每张图像都映射到一个标签。由于数据集中不包含类别名称,因此将它们存储在此处
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
#训练集中有 60000 张图像,每张图像都表示为 28x28 像素
#train_images.shape
#(60000, 28, 28)
#测试集中有 10000 张图像。同样,每张图像都表示为 28x28 像素
#test_images.shape
#(10000, 28, 28)
#对数据进行预处理
#检查训练集中的第一张图像,就会发现像素值介于 0 到 255 之间
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
plt.show()
#我们将这些值缩小到 0 到 1 之间,然后将其馈送到神经网络模型。
#为此,将图像组件的数据类型从整数转换为浮点数,然后除以 255
train_images = train_images / 255.0
test_images = test_images / 255.0
#显示训练集中的前 25 张图像,并在每张图像下显示类别名称。验证确保数据格式正确无误
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
def create_model():
#第一层 tf.keras.layers.Flatten 将图像格式从二维数组(28x28 像素)转换成一维数组(28 * 28 = 784 像素)。
#可以将该层视为图像中像素未堆叠的行,并排列这些行。该层没有要学习的参数;它只改动数据的格式。
#在扁平化像素之后,该网络包含两个 tf.keras.layers.Dense 层的序列。这些层是密集连接或全连接神经层。
#第一个 Dense 层具有 128 个节点(或神经元)。第二个(也是最后一个)层是具有 10 个节点的 softmax 层,
#该层会返回一个具有 10 个概率得分的数组,这些得分的总和为 1。
#每个节点包含一个得分,表示当前图像属于 10 个类别中某一个的概率。
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation=tf.nn.relu),
keras.layers.Dense(10, activation=tf.nn.softmax)])
#损失函数 - 衡量模型在训练期间的准确率。我们希望尽可能缩小该函数,以“引导”模型朝着正确的方向优化。
#优化器 - 根据模型看到的数据及其损失函数更新模型的方式。
#指标 - 用于监控训练和测试步骤。以下示例使用准确率,即图像被正确分类的比例。
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
# include the epoch in the file name. (uses `str.format`)
checkpoint_path = "training_2/cp-{epoch:04d}.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
latest = tf.train.latest_checkpoint(checkpoint_dir)
print("Latest model:",latest)
# Create checkpoint callback
cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
save_weights_only=True,
# Save weights, every 5-epochs.
period=5,
verbose=1)
# Create a basic model instance
model = create_model()
if latest != None:
model.load_weights(latest)
print("Load latest model: ", latest)
model.summary()
loss, acc = model.evaluate(test_images, test_labels)
print("Untrained model, accuracy: {:5.2f}%".format(100*acc))
print("Training...")
#使模型与训练数据“拟合”
model.fit(train_images,
train_labels,
epochs=10,
callbacks = [cp_callback],
validation_data = (test_images, test_labels))
#评估准确率
#比较一下模型在测试数据集上的表现
model = create_model()
if latest != None:
model.load_weights(latest)
else:
model.load_weights("training_2/cp-0005.ckpt")
test_loss, test_acc = model.evaluate(test_images, test_labels)
print("Test loss: ", test_loss)
#模型在测试数据集上的准确率略低于在训练数据集上的准确率。训练准确率和测试准确率之间的这种差异表示出现过拟合。
print('Test accuracy:', test_acc)
|
# -*- coding: utf-8 -*-
from hashlib import sha1
import hmac
class Pushpad(object):
def __init__(self, auth_token, project_id):
self.auth_token = auth_token
self.project_id = project_id
def signature_for(self, data):
return hmac.new(bytes(self.auth_token.encode()), data.encode(), sha1).hexdigest()
|
from .QueuePipe import QueuePipe
import multiprocessing
from multiprocessing import Process, Queue
from typing import List, Tuple, Union, Any
from collections.abc import Callable
import time
import signal
class PayloadProcess:
def __init__(self, payload: Tuple[str, Callable, Tuple, Callable, Tuple, Union[float, None]]):
self.name, self.target, self.args, self.staticObjBuilder, self.staticObjBuilderArgs, self.qTimeout = payload
self.queue = QueuePipe()
self.actionQueue = Queue()
if self.qTimeout is not None:
self.queue = QueuePipe(timeout=self.qTimeout)
self.process = Process(name=self.name, target=self.run, args=(self.queue.inputQ, self.queue.outputQ,
self.actionQueue), daemon=True)
self.stopped = False
def run(self, inputQ, outputQ, actionQ):
# build the static arguments
qPipe = QueuePipe(inputQ, outputQ)
if self.staticObjBuilder is not None:
staticObjects = self.staticObjBuilder(self.staticObjBuilderArgs)
targetArgs = (qPipe,) + tuple(staticObjects) + self.args
else:
targetArgs = (qPipe,) + self.args
# define signal catches
self.handleSignals()
# run the loop for the target function
while not self.stopped:
self.parseActionQueue(actionQ)
if self.stopped:
break
outputs = self.target(targetArgs)
self.putOutputs(outputs)
def parseActionQueue(self, actionQueue: Union[Queue, None] = None):
actionQ = self.actionQueue if actionQueue is None else actionQueue
if not actionQ.empty():
action = actionQ.get()
if isinstance(action, float) or isinstance(action, int):
time.sleep(action)
elif isinstance(action, str):
if action == 'stop':
self.stopped = True
def start(self) -> 'PayloadProcess':
self.process.start()
return self
def status(self) -> Tuple[bool, bool]:
alive = self.process.is_alive()
running = self.stopped
return alive, running
def sleep(self, delay: float):
self.actionQueue.put(delay)
def stop(self):
self.actionQueue.put('stop')
def join(self, timeout: Union[float, None] = None):
if not self.process.is_alive():
return
if timeout is not None:
self.stop()
self.process.join(timeout=timeout)
else:
self.stop()
self.process.join()
def terminate(self):
if self.process.is_alive():
self.process.terminate()
def kill(self):
if self.process.is_alive():
self.process.kill()
def handleSignals(self):
signal.signal(signal.SIGINT, self.handle_sigint)
def handle_sigint(self, sig, frame):
self.stop()
# Queue functionality and interactivity
def putInputs(self, inputs: Union[List[Any], Any]):
self.queue.addInputs(inputs)
def putOutputs(self, outputs: Union[List[Any], Any]):
self.queue.addOutputs(outputs)
def getOutputs(self, timeout: Union[float, None] = None) -> List[Any]:
return self.queue.getOutputs(timeout=timeout)
def getOutput(self, timeout: Union[float, None] = None) -> Any:
return self.queue.getOutput(timeout=timeout)
|
# Generated by Django 3.0.3 on 2020-05-23 12:31
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('main', '0014_auto_20200523_1236'),
]
operations = [
migrations.RemoveField(
model_name='block',
name='id',
),
migrations.RemoveField(
model_name='logic',
name='id',
),
migrations.RemoveField(
model_name='order',
name='id',
),
migrations.RemoveField(
model_name='userdata',
name='id',
),
migrations.AddField(
model_name='block',
name='uuid',
field=models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False),
),
migrations.AddField(
model_name='logic',
name='uuid',
field=models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False),
),
migrations.AddField(
model_name='order',
name='uuid',
field=models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False),
),
migrations.AddField(
model_name='userdata',
name='uuid',
field=models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False),
),
]
|
from django.urls import reverse
from django.db import models
class Weight(models.Model):
""" Weight model (measured in kg). """
id = models.AutoField(primary_key=True, editable=False)
datestamp = models.DateField(blank=True, null=True)
weight = models.FloatField(blank=True, null=True)
author = models.ForeignKey('auth.user', on_delete=models.CASCADE)
def __str__(self):
return f'{self.datestamp} - {self.weight} kg'
def get_absolute_url(self):
return reverse('weight-index')
|
from conans import ConanFile, CMake, tools
import os
class SfmlConan(ConanFile):
name = 'sfml'
description = 'Simple and Fast Multimedia Library'
topics = ('sfml', 'multimedia')
url = 'https://github.com/bincrafters/community'
homepage = 'https://github.com/SFML/SFML'
license = "ZLIB"
exports_sources = ['CMakeLists.txt', 'patches/*']
generators = 'cmake', 'cmake_find_package_multi'
settings = 'os', 'compiler', 'build_type', 'arch'
options = {
'shared': [True, False],
'fPIC': [True, False],
'window': [True, False],
'graphics': [True, False],
'network': [True, False],
'audio': [True, False],
}
default_options = {
'shared': False,
'fPIC': True,
'window': True,
'graphics': True,
'network': True,
'audio': True
}
_source_subfolder = 'source_subfolder'
_build_subfolder = 'build_subfolder'
_cmake = None
def config_options(self):
if self.settings.os == 'Windows':
self.options.remove('fPIC')
def configure(self):
if self.options.graphics:
self.options.window = True
def requirements(self):
if self.options.graphics:
self.requires('freetype/2.10.4')
self.requires('stb/20200203')
if self.options.audio:
self.requires('openal/1.21.0')
self.requires('flac/1.3.3')
self.requires('ogg/1.3.4')
self.requires('vorbis/1.3.7')
if self.options.window:
if self.settings.os == 'Linux':
self.requires('xorg/system')
self.requires('opengl/system')
def system_requirements(self):
if self.settings.os == 'Linux' and tools.os_info.is_linux:
if tools.os_info.with_apt:
installer = tools.SystemPackageTool()
packages = []
if self.options.window:
packages.extend(['libudev-dev'])
for package in packages:
installer.install(package)
def source(self):
tools.get(**self.conan_data["sources"][self.version],
strip_root=True, destination=self._source_subfolder)
tools.rmdir(os.path.join(self._source_subfolder, "extlibs"))
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions['SFML_DEPENDENCIES_INSTALL_PREFIX'] = self.package_folder
self._cmake.definitions['SFML_MISC_INSTALL_PREFIX'] = self.package_folder
self._cmake.definitions['SFML_BUILD_WINDOW'] = self.options.window
self._cmake.definitions['SFML_BUILD_GRAPHICS'] = self.options.graphics
self._cmake.definitions['SFML_BUILD_NETWORK'] = self.options.network
self._cmake.definitions['SFML_BUILD_AUDIO'] = self.options.audio
self._cmake.definitions['SFML_INSTALL_PKGCONFIG_FILES'] = False
self._cmake.definitions['SFML_GENERATE_PDB'] = False
self._cmake.definitions['SFML_USE_SYSTEM_DEPS'] = True
if self.settings.os == "Macos":
self._cmake.definitions['SFML_OSX_FRAMEWORK'] = "-framework AudioUnit"
elif self.settings.compiler == 'Visual Studio':
if self.settings.compiler.runtime == 'MT' or self.settings.compiler.runtime == 'MTd':
self._cmake.definitions['SFML_USE_STATIC_STD_LIBS'] = True
self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def build(self):
for p in self.conan_data["patches"][self.version]:
tools.patch(**p)
with tools.vcvars(self.settings, force=True, filter_known_paths=False) if self.settings.compiler == 'Visual Studio' else tools.no_op():
cmake = self._configure_cmake()
cmake.build()
def package(self):
cmake = self._configure_cmake()
cmake.install()
if self.settings.os == 'Macos' and self.options.shared and self.options.graphics:
with tools.chdir(os.path.join(self.package_folder, 'lib')):
suffix = '-d' if self.settings.build_type == 'Debug' else ''
graphics_library = 'libsfml-graphics%s.%s.dylib' % (suffix, self.version)
old_path = '@rpath/../Frameworks/freetype.framework/Versions/A/freetype'
new_path = '@loader_path/../freetype.framework/Versions/A/freetype'
command = 'install_name_tool -change %s %s %s' % (old_path, new_path, graphics_library)
self.output.warn(command)
self.run(command)
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
def _get_decorated_lib(self, name):
suffix = '-s' if not self.options.shared else ''
suffix += '-d' if self.settings.build_type == 'Debug' else ''
return name + suffix
def package_info(self):
self.cpp_info.names["cmake_find_package"] = "SFML"
self.cpp_info.names["cmake_find_package_multi"] = "SFML"
self.cpp_info.names["pkg_config"] = "SFML"
self.cpp_info.components["sfml-system"].names["pkg_config"] = "sfml-system"
self.cpp_info.components["sfml-system"].names["cmake_find_package"] = "system"
self.cpp_info.components["sfml-system"].names["cmake_find_package_multi"] = "system"
self.cpp_info.components["sfml-system"].libs = [self._get_decorated_lib("sfml-system")]
if not self.options.shared:
self.cpp_info.components["sfml-system"].defines = ['SFML_STATIC']
if self.settings.os == 'Windows':
self.cpp_info.components["sfml-system"].system_libs = ['winmm']
elif self.settings.os == 'Linux':
self.cpp_info.components["sfml-system"].system_libs = ['rt']
elif self.settings.os == 'Android':
self.cpp_info.components["sfml-system"].system_libs = ['android', 'log']
if self.settings.os != 'Windows':
self.cpp_info.components["sfml-system"].system_libs = ['pthread']
if self.settings.os in ['Windows', 'Android', 'iOS']:
sfml_main_suffix = '-d' if self.settings.build_type == 'Debug' else ''
self.cpp_info.components["sfml-main"].libs = ["sfml-main" + sfml_main_suffix]
if not self.options.shared:
self.cpp_info.components["sfml-main"].defines = ['SFML_STATIC']
if self.settings.os == 'Android':
self.cpp_info.components["sfml-main"].libs.append(self._get_decorated_lib("sfml-activity"))
self.cpp_info.components["sfml-main"].system_libs = ['android', 'log']
if self.options.window or self.options.graphics:
self.cpp_info.components["sfml-window"].names["pkg_config"] = "sfml-window"
self.cpp_info.components["sfml-window"].names["cmake_find_package"] = "window"
self.cpp_info.components["sfml-window"].names["cmake_find_package_multi"] = "window"
self.cpp_info.components["sfml-window"].libs = [self._get_decorated_lib("sfml-window")]
self.cpp_info.components["sfml-window"].requires = ["opengl::opengl", "sfml-system"]
if self.settings.os in ['Linux', 'FreeBSD']:
self.cpp_info.components["sfml-window"].requires.append('xorg::xorg')
if not self.options.shared:
self.cpp_info.components["sfml-window"].defines = ['SFML_STATIC']
if self.settings.os == 'Windows':
self.cpp_info.components["sfml-window"].system_libs = ['winmm', 'gdi32']
if self.settings.os == 'Linux':
self.cpp_info.components["sfml-window"].system_libs = ['udev']
if self.settings.os == 'FreeBSD':
self.cpp_info.components["sfml-window"].system_libs = ['usbhid']
elif self.settings.os == "Macos":
self.cpp_info.components["sfml-window"].frameworks.extend(['Foundation', 'AppKit', 'IOKit', 'Carbon'])
if not self.options.shared:
self.cpp_info.components["sfml-window"].exelinkflags.append("-ObjC")
self.cpp_info.components["sfml-window"].sharedlinkflags = self.cpp_info.components["sfml-window"].exelinkflags
elif self.settings.os == "iOS":
self.cpp_info.frameworks.extend(['Foundation', 'UIKit', 'CoreGraphics', 'QuartzCore', 'CoreMotion'])
elif self.settings.os == "Android":
self.cpp_info.components["sfml-window"].system_libs = ['android']
if self.options.graphics:
self.cpp_info.components["sfml-graphics"].names["pkg_config"] = "sfml-graphics"
self.cpp_info.components["sfml-graphics"].names["cmake_find_package"] = "graphics"
self.cpp_info.components["sfml-graphics"].names["cmake_find_package_multi"] = "graphics"
self.cpp_info.components["sfml-graphics"].libs = [self._get_decorated_lib("sfml-graphics")]
self.cpp_info.components["sfml-graphics"].requires = ["freetype::freetype", "stb::stb", "sfml-window"]
if not self.options.shared:
self.cpp_info.components["sfml-graphics"].defines = ['SFML_STATIC']
if self.settings.os == 'Linux':
self.cpp_info.components["sfml-graphics"].system_libs = ['udev']
if self.options.network:
self.cpp_info.components["sfml-network"].names["pkg_config"] = "sfml-network"
self.cpp_info.components["sfml-network"].names["cmake_find_package"] = "network"
self.cpp_info.components["sfml-network"].names["cmake_find_package_multi"] = "network"
self.cpp_info.components["sfml-network"].libs = [self._get_decorated_lib("sfml-network")]
self.cpp_info.components["sfml-network"].requires = ["sfml-system"]
if not self.options.shared:
self.cpp_info.components["sfml-network"].defines = ['SFML_STATIC']
if self.settings.os == 'Windows':
self.cpp_info.components["sfml-network"].system_libs = ['ws2_32']
if self.options.audio:
self.cpp_info.components["sfml-audio"].names["pkg_config"] = "sfml-audio"
self.cpp_info.components["sfml-audio"].names["cmake_find_package"] = "audio"
self.cpp_info.components["sfml-audio"].names["cmake_find_package_multi"] = "audio"
self.cpp_info.components["sfml-audio"].libs = [self._get_decorated_lib("sfml-audio")]
self.cpp_info.components["sfml-audio"].requires = ["openal::openal", "flac::flac", "ogg::ogg", "vorbis::vorbis", "sfml-system"]
if not self.options.shared:
self.cpp_info.components["sfml-audio"].defines = ['SFML_STATIC']
if self.settings.os == "Android":
self.cpp_info.components["sfml-audio"].system_libs = ['android']
|
from pathlib import Path
from tempfile import TemporaryDirectory
from urllib.parse import urljoin
import pytest
from django.conf import settings
from snoop import models, walker, views
pytestmark = [
pytest.mark.django_db,
pytest.mark.skipif(not settings.DATABASES, reason="DATABASES not set"),
]
def _collection(**kwargs):
col = models.Collection.objects.create(**kwargs)
walker.Walker.walk(
root=col.path,
prefix=None,
container_doc=None,
collection=col,
)
return col
@pytest.fixture
def testdata():
return _collection(
slug='apitest',
path=settings.SNOOP_ROOT + '/eml-2-attachment',
)
def test_get_data(testdata):
email = testdata.document_set.get(path='message-without-subject.eml')
data = views._process_document(testdata.slug, email.id)
content = data['content']
assert content['date'] == '2013-10-10T10:04:49-07:00'
assert content['filetype'] == 'email'
assert content['filename'] == 'message-without-subject.eml'
assert content['path'] == 'message-without-subject.eml'
assert content['from'] == \
'Negoita Camelia <don.t_mess_with_miky@yahoo.com>'
assert content['md5'] == '2008f17802012f11fc4b35234a4af672'
def test_attachments(testdata):
from snoop.digest import create_children, digest
email = testdata.document_set.get(path='message-without-subject.eml')
new_children = create_children(email, digest(email), True)
data = views._process_document(testdata.slug, email.id)
children = data['children']
assert len(children) == 2
assert children[0]['filename'] == 'IMAG1077.jpg'
assert children[0]['content_type'] == 'image/jpeg'
@pytest.fixture
def mockdata():
with TemporaryDirectory() as tmp:
root = Path(tmp) / 'mock'
root.mkdir()
for n in range(42):
with (root / 'doc_{}.txt'.format(n)).open('w') as f: pass
col = _collection(slug='mock', path=str(root), title="Mock")
from snoop.digest import worker
for doc in col.document_set.all():
worker(doc.id, False)
yield col
def test_collection_metadata_and_feed(mockdata, client):
col_url = '/mock/json'
col = client.get(col_url).json()
assert col['title'] == "Mock"
def feed_page(url):
page = client.get(url).json()
next_url = urljoin(url, page['next']) if page.get('next') else None
return next_url, page['documents']
docs = []
feed_url = urljoin(col_url, col['feed'])
while feed_url:
feed_url, page_docs = feed_page(feed_url)
docs.extend(page_docs)
expected_paths = {''} ^ {"doc_{}.txt".format(n) for n in range(42)}
assert {d['content']['path'] for d in docs} == expected_paths
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# this
import utils
def vectorizer(labeled_message, term_voc, doc_voc):
"""
labeled_message: dict
donary with the following fields: {score, id, terms, features}
term_voc : core.TermVocabulary
doc_voc : core.DocVocabulary
returns : dict
vector {index1: value1, ... , indexN: valueN}
"""
return feature_vectorizer(labeled_message['features'], term_voc)
def feature_vectorizer(features, term_voc):
"""
Produces vector of features
term_voc : core.TermVocabulary
returns: dict
vector {index1: value1, ..., indexN: valueN}
"""
vector = {}
for feature_name in features.keys():
if not term_voc.contains(feature_name):
term_voc.insert_term(feature_name)
index = term_voc.get_term_index(feature_name)
vector[index] = features[feature_name]
return vector
if __name__ == "__main__":
utils.vectorization_core(vectorizer, init_term_vocabulary=False)
|
# -*- coding: utf-8 -*-
"""
@author: Rajesh
"""
from typing import List, Tuple
import imageio
from io import BytesIO
from PIL import Image
import hydra
from omegaconf import open_dict, DictConfig
from typing import Mapping
import numpy as np
import os
import streamlit as st
from dataloader.ufs_data import UnknownTestData
import torch
from torchvision import transforms
import matplotlib.pyplot as plt
import warnings
warnings.simplefilter("ignore")
@st.cache(
hash_funcs={torch.jit._script.RecursiveScriptModule: id}, allow_output_mutation=True
)
def load_model(
model_path: str = "saved_models/traced_pspnet_model.pt",
) -> torch.ScriptModule:
"""Loads and returns the torchscript model from path.
Parameters
----------
model_path : str, optional
The path to the torchscript model, by default "models/face_blur.pt"
Returns
-------
torch.ScriptModule
The loaded torchscript model object.
"""
return torch.jit.load(model_path, map_location="cpu")
@st.cache(
hash_funcs={torch.jit._script.RecursiveScriptModule: id}, allow_output_mutation=True
)
def get_inference_on_bytes_image(
model: torch.ScriptModule, image_file: BytesIO
) -> Tuple[Image.Image, Image.Image]:
"""Runs model inference on a BytesIO image stream.
Parameters
----------
model : torch.ScriptModule
The TorchScript loaded module.
image_file : BytesIO
The image file uploaded returned as a BytesIO object.
Returns
-------
Tuple[Image.Image, Image.Image]
A tuple of input image to the model and the output from the model.
"""
if isinstance(image_file, st.uploaded_file_manager.UploadedFile):
raw_image = Image.open(image_file).convert("L")
else:
raw_image = Image.fromarray(image_file).convert("L")
# print(f"model type {type(model)}")
image = torch.tensor(np.array(raw_image)).unsqueeze(dim=0).unsqueeze(dim=0)
# print(image.dtype)
img = transforms.Resize(size=(512, 512), interpolation=Image.BILINEAR)(image)
img = img.float()
output = model(img).squeeze()
print(f"output shape is {output.dtype}")
# return Image.fromarray(image[0].detach().numpy()), Image.fromarray(
# output[0].detach().numpy()
# )
return img.detach().numpy().squeeze().astype(np.uint8), output.detach().numpy()
def get_config_from_sidebar() -> dict:
"""Defines and returns the configuration in the sidebar.
Returns
-------
dict
A dict of the different inputs chosen by the user.
"""
st.sidebar.markdown("## Upload image/video data and annotation")
upload_video = st.sidebar.checkbox("Upload video?", value=False)
with st.sidebar.beta_expander("See explanation"):
st.write(
"""
If True, you can upload video.
If False, you can upload images.
"""
)
if upload_video:
video_file = st.sidebar.file_uploader(
"Upload your video here (mp4, avi).", type=["mp4", "avi"]
)
if video_file is None:
st.stop()
video = imageio.get_reader(video_file, "ffmpeg")
image_files = []
for image in video.iter_data():
image_files.append(image)
else:
image_files = st.sidebar.file_uploader(
"Upload your image/s here.",
type=["png", "jpg", "jpeg"],
accept_multiple_files=True,
)
if len(image_files) == 0:
st.stop()
elif len(image_files) == 1:
image_idx = 0
else:
num_images = len(image_files)
image_idx = st.sidebar.slider("Choose an image index", 0, num_images - 1, 0)
fit_to_column = st.sidebar.checkbox("Fit images to column width")
return {
"image": image_files[image_idx],
"fit_to_column": fit_to_column,
}
def sidebar(cfg: Mapping):
# add a sidebar heading
st.sidebar.markdown("# Choose images")
# possible model options
model_options = ["pspnet", "unet", "deeplabv3"]
# default dataset index
default_model_idx = 0 # corresponds to pspnet model
# dataset to choose
dataset = st.sidebar.selectbox(
"Which Model to choose?", model_options, default_model_idx
)
transform = transforms.Compose(
[
transforms.Resize(size=(cfg.data.width, cfg.data.height), interpolation=Image.BILINEAR),
transforms.ToTensor(),
]
)
dataset_obj = UnknownTestData(
cfg,
data_root=cfg.st_data.data_dir,
bbox_dir=None, # cfg.st_data.bbox_dir
transform=transform,
)
# get number of images in the dataset
num_images = len(dataset_obj)
# get image index
image_idx = st.sidebar.slider("Choose an image index", 0, num_images - 1, 0)
print(f"image idx is {image_idx}")
# get image path
image_path = dataset_obj.img_files[image_idx]
# get processed image and target
image = dataset_obj[image_idx]
return image, image_idx, image_path
@hydra.main(config_name="../config")
def main(cfg: DictConfig):
"""Main function for the script.
Defines the user input configuration, loads and runs the model and visualizes
the output.
"""
model_path = cfg.st_data.model_path
save_path = cfg.st_data.save_path
image, image_idx, image_path = sidebar(cfg)
model = load_model(model_path=model_path)
# construct UI layout
st.title("Semantic Segmentation of Aquatic Organisms in Underwater Videos")
# get the raw image to display on the web page
raw_image = Image.open(image_path).convert("L")
# get the model prediction
prediction = model(image.unsqueeze(dim=0)).detach().squeeze().numpy()
prediction = np.where(prediction > 0.6, 255, 0).astype(np.uint8)
resized_pred = Image.fromarray(prediction).resize(size=(raw_image.width, raw_image.height))
st.image(
[raw_image, resized_pred],
caption=["original image", "predicted mask"],
width=224
# use_column_width=True,
)
# st.image(
# prediction,
# caption="predicted mask",
# width=224
# # use_column_width=True,
# )
is_ok = st.sidebar.button(label="Segmentation OK?")
if is_ok:
os.makedirs(save_path, exist_ok=True)
out_path = os.path.join(save_path, os.path.basename(image_path[:-3]) + "png")
resized_pred.save(out_path)
st.sidebar.write("Mask saved as ground-truth")
else:
# st.sidebar.write("Mask is not perfect and is not saved.")
print("Mask is not perfect and is not saved.")
if __name__ == "__main__":
main()
|
# finds files that were not successfully converted; when I ran the code,
# this revealed 14, all of which had misformed URLs. I corrected them
# manually and re-ran the import
import os
GML_PATH = './spain_catastre/gml'
if __name__ == '__main__':
for f in os.listdir(GML_PATH):
parts = f.split('.')
if parts[-1] == 'zip':
csv_filename = ('.'.join(parts[:-1])) + '.csv'
if not os.path.exists('./build/' + csv_filename):
print '%s/%s' % (GML_PATH, f)
|
#!/usr/bin/env python2
import socket
import struct
import sys
# Monkey patch for:
# - SOCK_SEQPACKET
# - IPPROTO_SCTP
# - bindx
if not hasattr(socket, 'SOCK_SEQPACKET'):
socket.SOCK_SEQPACKET = 5
if not hasattr(socket, 'IPPROTO_SCTP'):
socket.IPPROTO_SCTP = 132
SCTP_BINDX_ADD_ADDR = 1
SCTP_BINDX_REM_ADDR = 2
# Load libsctp shared library
# and resolve sctp_bindx
# under x64-64 Debian 9, it resolves to /usr/lib/x86_64-linux-gnu/libsctp.so.1
import ctypes
libsctp = None
if libsctp is None:
try:
libsctp = ctypes.CDLL('libsctp.so')
except:
pass
if libsctp is None:
try:
libsctp = ctypes.CDLL('libsctp.so.1')
except:
pass
if libsctp is None:
print('could not load SCTP shared library. Will now exit')
sys.exit(1)
assert(libsctp is not None)
real_bindx = libsctp.sctp_bindx
assert(real_bindx)
real_connectx = libsctp.sctp_connectx
assert(real_connectx)
# sockaddr_in structure
class SOCKADDR_IN(ctypes.Structure):
_fields_ = [
("sin_family", ctypes.c_uint16),
("sin_port", ctypes.c_uint16),
("sin_addr", ctypes.c_uint32),
("sin_zero", ctypes.c_byte*8),
]
class SOCKADDR_IN6(ctypes.Structure):
_fields_ = [
("sin6_family", ctypes.c_uint16),
("sin6_port", ctypes.c_uint16),
("sin6_flowinfo", ctypes.c_uint32),
("sin6_addr", ctypes.c_byte*16),
("sin6_scope_id", ctypes.c_uint32),
]
def bindx(f, addrs, family=socket.AF_INET):
assert(family in (socket.AF_INET, socket.AF_INET6))
if family == socket.AF_INET:
ADDRS_IN = SOCKADDR_IN * len(addrs)
else:
ADDRS_IN = SOCKADDR_IN6 * len(addrs)
addrs_in = ADDRS_IN()
for i in range(len(addrs)):
(addr, port) = addrs[i]
if family == socket.AF_INET:
addrs_in[i].sin_family = family
addrs_in[i].sin_port = socket.htons(port)
addrs_in[i].sin_addr = struct.unpack('<I', socket.inet_aton(addr))[0]
else:
addrs_in[i].sin6_family = family
addrs_in[i].sin6_port = socket.htons(port)
addrs_in[i].sin6_flowinfo = 0
addrs_in[i].sin6_sin6_addr = socket.inet_pton(family, addr)
addrs_in[i].sin6_scope_id = 0
return real_bindx(f.fileno(), addrs_in, len(addrs_in), SCTP_BINDX_ADD_ADDR)
def connectx(f, addrs):
assert(family in (socket.AF_INET, socket.AF_INET6))
if family == socket.AF_INET:
ADDRS_IN = SOCKADDR_IN * len(addrs)
else:
ADDRS_IN = SOCKADDR_IN6 * len(addrs)
addrs_in = ADDRS_IN()
for i in range(len(addrs)):
(addr, port) = addrs[i]
if family == socket.AF_INET:
addrs_in[i].sin_family = family
addrs_in[i].sin_port = socket.htons(port)
addrs_in[i].sin_addr = struct.unpack('<I', socket.inet_aton(addr))[0]
else:
addrs_in[i].sin6_family = family
addrs_in[i].sin6_port = socket.htons(port)
addrs_in[i].sin6_flowinfo = 0
addrs_in[i].sin6_sin6_addr = struct.unpack('<Q', socket.inet_pton(family, addr))[0]
addrs_in[i].sin6_scope_id = 0
assoc = ctypes.c_int(0) # ?
ret = real_connectx(f.fileno(), addrs_in, len(addrs_in), ctypes.byref(assoc))
return ret
|
"""
Capstone Project. Code to run on the EV3 robot (NOT on a laptop).
Author: Your professors (for the framework)
and Yicheng Yang..
Winter term, 2018-2019.
"""
import M2_rosebot
import mqtt_remote_method_calls as com
import time
import m2_gui_delegate
def main():
"""
This code, which must run on the EV3 ROBOT:
1. Makes the EV3 robot to various things.
2. Communicates via MQTT with the GUI code that runs on the LAPTOP.
"""
real_run()
# go_along_black()
# go_straight_for_seconds(99999,50)
def real_run():
robot = M2_rosebot.RoseBot()
delegate = m2_gui_delegate.ResponderToGUIMessages(robot)
mqtt_receiver = com.MqttClient(delegate)
mqtt_receiver.connect_to_pc()
delegate.mqtt_sender=mqtt_receiver
while True:
if delegate.stop_program:
break
time.sleep(0.01)
# def main():
# robot = rosebot.RoseBot()
# rc = RemoteControlEtc(robot)
# client = com.MqttClient(rc)
# client.connect_to_pc()
# rc.client = client
#
# while True:
# if robot.color_sensor.get_color() == 6:
# time.sleep(1.5)
# robot.drive_system.turn_degrees(-95)
# if robot.proximity_sensor.get_distance_to_nearest_object() <= 10:
# ev3.Sound.speak('mission complete').wait()
# robot.drive_system.stop_moving()
# time.sleep(0.01) # For the delegate to do its work
# real_run()
# run_test_arm()
# run_caliberate_arm()
# run_mov3_arm_to_position(5000)
# lower_arm()
# go(100, 50)
# stop()
# go_straight_for_seconds(10,70)
# go_straight_for_inches_using_time(30, 50)
# class RemoteControlEtc(object):
# def __init__(self, robot):
# """
#
# Stores the robot.
# :type robot: rb.Snatch3rRobot
# """
# self.robot = robot
# self.client = None
# pass
# def go():
# def run_test_arm():
# robot=M2_rosebot.RoseBot()
# robot.arm_and_claw.raise_arm()
#
# def run_caliberate_arm():
# robot=M2_rosebot.RoseBot()
# print('running')
# robot.arm_and_claw.calibrate_arm()
#
# def run_mov3_arm_to_position(pos):
# robot=M2_rosebot.RoseBot()
# robot.arm_and_claw.move_arm_to_position(pos)
#
# def lower_arm():
# robot =M2_rosebot.RoseBot()
# robot.arm_and_claw.lower_arm()
# def go(left, right):
# robot = M2_rosebot.RoseBot()
# robot.drive_system.go(left , right)
# def stop():
# robot = M2_rosebot.RoseBot()
# robot.drive_system.stop()
# def go_straight_for_seconds(second, speed):
# print('running')
# robot = M2_rosebot.RoseBot()
# robot.drive_system.go_straight_for_seconds(second, speed)
# def go_straight_for_inches_using_time(inch, speed):
# robot = M2_rosebot.RoseBot()
# robot.drive_system.go_straight_for_inches_using_time(inch, speed)
# def go_straight_for_inches_using_encoder(inch, speed):
# robot = M2_rosebot.RoseBot()
# robot.drive_system.go_straight_for_inches_using_encoder(inch, speed)
# def beeper(time):
# robot = M2_rosebot.RoseBot()
# robot.sound_system.beeper.beep(time)
# def tone_make(frequency, duration):
# robot = M2_rosebot.RoseBot()
# robot.sound_system.tone_maker.play_tone(frequency,duration).wait()
#
#
# def speak(str):
# robot = M2_rosebot.RoseBot()
# robot.sound_system.speech_maker.speak(str)
#
#
# def go_and_increase_frequency(speed,frequency_step):
# robot = M2_rosebot.RoseBot()
# robot.drive_system.go_and_increase_frequency(speed,frequency_step)
# def go_along_black():
# robot = M2_rosebot.RoseBot()
# robot.drive_system.go_along_black_line()
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
|
# Copyright 2019 The Feast Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, MutableMapping, Optional
import yaml
from google.protobuf import json_format
from google.protobuf.json_format import MessageToDict, MessageToJson
from google.protobuf.timestamp_pb2 import Timestamp
from feast.loaders import yaml as feast_yaml
from feast.protos.feast.core.Entity_pb2 import Entity as EntityV2Proto
from feast.protos.feast.core.Entity_pb2 import EntityMeta as EntityMetaProto
from feast.protos.feast.core.Entity_pb2 import EntitySpecV2 as EntitySpecProto
from feast.usage import log_exceptions
from feast.value_type import ValueType
class Entity:
"""
Represents a collection of entities and associated metadata.
"""
@log_exceptions
def __init__(
self,
name: str,
value_type: ValueType = ValueType.UNKNOWN,
description: str = "",
join_key: Optional[str] = None,
labels: Optional[MutableMapping[str, str]] = None,
):
self._name = name
self._description = description
self._value_type = value_type
if join_key:
self._join_key = join_key
else:
self._join_key = name
self._labels: MutableMapping[str, str]
if labels is None:
self._labels = dict()
else:
self._labels = labels
self._created_timestamp: Optional[Timestamp] = None
self._last_updated_timestamp: Optional[Timestamp] = None
def __eq__(self, other):
if not isinstance(other, Entity):
raise TypeError("Comparisons should only involve Entity class objects.")
if (
self.labels != other.labels
or self.name != other.name
or self.description != other.description
or self.value_type != other.value_type
or self.join_key != other.join_key
):
return False
return True
def __str__(self):
return str(MessageToJson(self.to_proto()))
@property
def name(self):
"""
Returns the name of this entity
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this entity
"""
self._name = name
@property
def description(self):
"""
Returns the description of this entity
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this entity
"""
self._description = description
@property
def join_key(self):
"""
Returns the join key of this entity
"""
return self._join_key
@join_key.setter
def join_key(self, join_key):
"""
Sets the join key of this entity
"""
self._join_key = join_key
@property
def value_type(self) -> ValueType:
"""
Returns the type of this entity
"""
return self._value_type
@value_type.setter
def value_type(self, value_type: ValueType):
"""
Set the type for this entity
"""
self._value_type = value_type
@property
def labels(self):
"""
Returns the labels of this entity. This is the user defined metadata
defined as a dictionary.
"""
return self._labels
@labels.setter
def labels(self, labels: MutableMapping[str, str]):
"""
Set the labels for this entity
"""
self._labels = labels
@property
def created_timestamp(self):
"""
Returns the created_timestamp of this entity
"""
return self._created_timestamp
@property
def last_updated_timestamp(self):
"""
Returns the last_updated_timestamp of this entity
"""
return self._last_updated_timestamp
def is_valid(self):
"""
Validates the state of a entity locally. Raises an exception
if entity is invalid.
"""
if not self.name:
raise ValueError("No name found in entity.")
if not self.value_type:
raise ValueError("No type found in entity {self.value_type}")
@classmethod
def from_yaml(cls, yml: str):
"""
Creates an entity from a YAML string body or a file path
Args:
yml: Either a file path containing a yaml file or a YAML string
Returns:
Returns a EntityV2 object based on the YAML file
"""
return cls.from_dict(feast_yaml.yaml_loader(yml, load_single=True))
@classmethod
def from_dict(cls, entity_dict):
"""
Creates an entity from a dict
Args:
entity_dict: A dict representation of an entity
Returns:
Returns a EntityV2 object based on the entity dict
"""
entity_proto = json_format.ParseDict(
entity_dict, EntityV2Proto(), ignore_unknown_fields=True
)
return cls.from_proto(entity_proto)
@classmethod
def from_proto(cls, entity_proto: EntityV2Proto):
"""
Creates an entity from a protobuf representation of an entity
Args:
entity_proto: A protobuf representation of an entity
Returns:
Returns a EntityV2 object based on the entity protobuf
"""
entity = cls(
name=entity_proto.spec.name,
description=entity_proto.spec.description,
value_type=ValueType(entity_proto.spec.value_type),
labels=entity_proto.spec.labels,
join_key=entity_proto.spec.join_key,
)
entity._created_timestamp = entity_proto.meta.created_timestamp
entity._last_updated_timestamp = entity_proto.meta.last_updated_timestamp
return entity
def to_proto(self) -> EntityV2Proto:
"""
Converts an entity object to its protobuf representation
Returns:
EntityV2Proto protobuf
"""
meta = EntityMetaProto(
created_timestamp=self.created_timestamp,
last_updated_timestamp=self.last_updated_timestamp,
)
spec = EntitySpecProto(
name=self.name,
description=self.description,
value_type=self.value_type.value,
labels=self.labels,
join_key=self.join_key,
)
return EntityV2Proto(spec=spec, meta=meta)
def to_dict(self) -> Dict:
"""
Converts entity to dict
Returns:
Dictionary object representation of entity
"""
entity_dict = MessageToDict(self.to_proto())
# Remove meta when empty for more readable exports
if entity_dict["meta"] == {}:
del entity_dict["meta"]
return entity_dict
def to_yaml(self):
"""
Converts a entity to a YAML string.
Returns:
Entity string returned in YAML format
"""
entity_dict = self.to_dict()
return yaml.dump(entity_dict, allow_unicode=True, sort_keys=False)
def to_spec_proto(self) -> EntitySpecProto:
"""
Converts an EntityV2 object to its protobuf representation.
Used when passing EntitySpecV2 object to Feast request.
Returns:
EntitySpecV2 protobuf
"""
spec = EntitySpecProto(
name=self.name,
description=self.description,
value_type=self.value_type.value,
labels=self.labels,
join_key=self.join_key,
)
return spec
def _update_from_entity(self, entity):
"""
Deep replaces one entity with another
Args:
entity: Entity to use as a source of configuration
"""
self.name = entity.name
self.description = entity.description
self.value_type = entity.value_type
self.labels = entity.labels
self.join_key = entity.join_key
self._created_timestamp = entity.created_timestamp
self._last_updated_timestamp = entity.last_updated_timestamp
|
import torch
import torch.nn as nn
import torch.autograd.Function as F
from seqtalk.binary_neurons import StochasticBinaryActivation
from collections import namedtuple
import numpy as np
TrainingParams = namedtuple('TrainingParams', [
'minibatch_size',
'num_epochs',
'minibatches_per_epoch',
'processor',
])
def load_tensor(kind, tensor):
if kind.lower() == 'gpu':
return tensor.gpu()
return tensor.cpu()
class TrainableModel(nn.Module):
@classmethod
def default_params(cls):
return TrainingParams(100, 5, 1000, 'gpu')
@classmethod
def prep_model(cls, data_source, training_params):
model = cls(data_source.input_length * data_source.alphabet_size)
load_tensor(training_params.processor, model)
model.zero_grad()
data_source.reset()
return model
@classmethod
def optimizer(cls, model, lr=None, momentum=None, sgd=True):
if sgd:
lr = lr if lr is not None else 0.1
momentum = momentum if momentum is not None else 0.9
return torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum)
@classmethod
def loss(cls):
return torch.nn.MSELoss(reduction='sum')
@classmethod
def train(cls, data_source, training_params=None):
training_params = cls.default_params() if training_params is None else training_params
model = cls.prep_model(data_source, training_params)
optimizer = cls.optimizer(model)
loss_fn = cls.loss(cls)
for epoch in range(training_params.num_epochs):
epoch_loss = 0
for batch_num in range(training_params.minibatches_per_epoch):
batch_loss = cls.train_minibatch(
epoch, batch_num, model,
optimizer, loss_fn,
data_source, training_params
)
epoch_loss += batch_loss
yield model, (epoch, epoch_loss.cpu().data)
@classmethod
def train_minibatch(cls,
epoch_num, batch_num, model,
optimizer, loss_fn,
data_source, training_params):
batch = data_source.next_batch(
training_params.minibatch_size, flat=True, type=np.float64
)
batch = load_tensor(training_params.processor, torch.FloatTensor(batch))
optimizer.zero_grad()
pred = model(batch)
loss = loss_fn(pred, batch)
loss.backward()
optimizer().step()
return loss
class SimpleContinuousNet(TrainableModel):
"""A very simple network that should learn the identity matrix
with a single layer of ReLU neurons.
"""
def __init__(self, input_size):
super(SimpleBinaryNet, self).__init__()
self.fc1 = nn.Linear(input_size, input_size, bias=False)
self.act = StochasticBinaryActivation()
self.sig = torch.nn.Sigmoid()
def forward(self, x):
x = self.fc1(x)
x = x.clamp(min=0)
x = self.sig(x)
return x
@classmethod
def train_minibatch(cls,
epoch_num, batch_num, model,
optimizer, loss_fn,
data_source, training_params):
batch = data_source.train.next_batch(
training_params.minibatch_size, flat=True, type=np.float64
)
batch = load_tensor(training_params.processor, torch.FloatTensor(batch))
optimizer.zero_grad()
loss = 0
for seq in batch:
pred = model(seq)
pred, seq = data_source.unflatten(pred, seq)
pred = F.softmax(pred)
loss += loss_fn(pred, seq)
loss.backward()
optimizer().step()
return loss
class SimpleBinaryNet(TrainableModel):
def __init__(self, input_size):
super(SimpleBinaryNet, self).__init__()
self.fc1 = nn.Linear(input_size, input_size, bias=False)
self.act = StochasticBinaryActivation()
self.sig = torch.nn.Sigmoid()
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
return x
|
#!/usr/local/bin/python3
import time
import os
import sys
import socks
import socket
import urllib.request
from log import *
print ('''\033[1;31m \n
_ _
| | (_)
__ _ _ _| |_ ___ _ __ _ _ __
/ _` | | | | __/ _ \| '__| | '_ \
| (_| | |_| | || (_) | | | | |_) |
\__,_|\__,_|\__\___/|_| |_| .__/
| |
|_|
Automatically Change Ip Address Tor #noobteam
''')
print ("\033[1;34m[*]___author___: @noobpk\033[1;37m")
print ("\033[1;34m[*]___version___: 1.2\033[1;37m")
print ("")
def detect_platform():
try:
platforms = {
'linux' : 'Linux',
'linux1' : 'Linux',
'linux2' : 'Linux',
'darwin' : 'OS X',
}
logger.info("[*] Detect Platform")
if sys.platform not in platforms:
logger.error("[x_x] Your platform currently does not support.")
sys.exit(0)
except Exception as e:
logger.error("[x_x] Something went wrong, please check your error message.\n Message - {0}".format(e))
def check_exists_tor_ip(ipTor):
with open ('listip.txt', 'r') as read_obj:
for line in read_obj:
if ipTor in line:
return True
else:
return False
def check_runon_osx():
try:
logger.info("[+] Check Requirement")
istor = os.system('command -v tor 1> /dev/null')
istorsocks = os.system ('command -v torsocks 1> /dev/null')
isprivoxy = os.path.isdir("/usr/local/Cellar/privoxy")
file_listip = os.path.isfile("listip.txt")
if (istor != 0):
logger.info("Installing Tor...")
os.system('brew install tor')
check_runon_osx()
if (istorsocks != 0):
logger.info("[+] Installing TorSocks...")
os.system('brew install torsocks')
check_runon_osx()
if (isprivoxy == False):
logger.info("[+] Installing Privoxy...")
os.system('brew install privoxy')
check_runon_osx()
if (file_listip == False):
logger.info("[+] Creating File listip.txt")
open("listip.txt", "w")
check_runon_osx()
else:
logger.info("[*] Tor has been install")
logger.info("[*] TorSocks has been install")
logger.info("[*] Privoxy has been install")
logger.info("[*] File listip.txt has been created")
startservice_osx()
except Exception as e:
logger.error("[x_x] Something went wrong, please check your error message.\n Message - {0}".format(e))
def startservice_osx():
try:
logger.info("[*] Current IP Addresss")
currentip = os.system("wget -qO- http://ipecho.net/plain 2> /dev/null ; echo")
logger.info("[*] Start service Tor")
os.system("brew services start tor")
time.sleep(2)
logger.info("[*] Start service Privoxy")
os.system("brew services start privoxy")
time.sleep(2)
logger.info("[*] Change your SOCKES to 127.0.0.1:9050")
logger.info("[+] Set time stamp")
sec = int(input("[?] Time to auto change ip by second (default 600s):") or "600")
loop = int(input("[?] Number of loop (default 144):") or "144")
for i in range(loop):
logger.info("[*] Change New IP")
os.system("brew services restart tor")
time.sleep(2)
#currentip = os.system("torsocks wget -qO- http://ipecho.net/plain 2> /dev/null ; echo")
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", 9050)
socket.socket = socks.socksocket
url = 'http://ipecho.net/plain'
request = urllib.request.Request(url)
request.add_header('Cache-Control','max-age=0')
response = urllib.request.urlopen(request)
newip = response.read().decode('utf-8')
if check_exists_tor_ip(newip):
restart_tor_service()
else:
f = open('listip.txt', 'a+')
f.write(newip + '\n')
logger.info("[*] Successfully - Your IP has been Changed")
logger.info("[*] New IP Addresss:")
print(newip)
time.sleep(sec)
logger.info("[#] The loop has finished refreshing it")
stopservice_osx()
except Exception as e:
logger.error("[x_x] Something went wrong, please check your error message.\n Message - {0}".format(e))
def restart_tor_service_osx():
logger.warning("Your IP Already Exist - Request New IP")
os.system("brew services restart tor")
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", 9050)
socket.socket = socks.socksocket
url = 'http://ipecho.net/plain'
request = urllib.request.Request(url)
request.add_header('Cache-Control','max-age=0')
response = urllib.request.urlopen(request)
newip = response.read().decode('utf-8')
if check_exists_tor_ip(newip):
restart_tor_service()
else:
f = open('listip.txt', 'a+')
f.write(newip + '\n')
logger.info("[*] Successfully - Your IP has been Changed")
logger.info("[*] New IP Addresss:")
print(newip)
def stopservice_osx():
try:
logger.info("[+] Stop service Tor")
os.system("brew services stop tor")
time.sleep(2)
logger.info("[+] Stop service Privoxy")
os.system("brew services stop privoxy")
os.system("clear")
except Exception as e:
logger.error("[x_x] Something went wrong, please check your error message.\n Message - {0}".format(e))
def check_runon_linux():
try:
logger.info("[+] Check Requirement")
istor = os.system('command -v tor 1> /dev/null')
istorsocks = os.system('command -v torsocks 1> /dev/null')
isprivoxy = os.system('command -v privoxy 1> /dev/null')
file_listip = os.path.isfile("listip.txt")
if (os.geteuid() != 0):
logger.error("[x_x] You need to have root privileges to run this script")
sys.exit(0)
if (istor != 0):
logger.info("[+] Installing Tor")
os.system('apt-get install tor -y')
check_runon_linux()
if (istorsocks != 0):
logger.info("[+] Installing TorSocks")
os.system('apt-get install torsocks -y')
check_runon_linux()
if (isprivoxy != 0):
logger.info("[+] Installing Privoxy")
os.system('apt-get install privoxy -y')
check_runon_linux()
if (file_listip == False):
logger.info("[+] Creating File listip.txt")
open("listip.txt", "w")
check_runon_linux()
else:
logger.info("[*] Tor has been install")
logger.info("[*] TorSocks has been install")
logger.info("[*] Privoxy has been install")
logger.info("[*] File listip.txt has been created")
startservice_linux()
except Exception as e:
logger.error("[x_x] Something went wrong, please check your error message.\n Message - {0}".format(e))
def startservice_linux():
try:
logger.info("[*] Current IP Addresss")
currentip = os.system("wget -qO- http://ipecho.net/plain 2> /dev/null ; echo")
logger.info("[*] Start service Tor")
os.system("service tor start")
time.sleep(2)
logger.info("[*] Start service Privoxy")
os.system("service privoxy start")
time.sleep(2)
logger.info("[*] Change your SOCKES to 127.0.0.1:9050")
logger.info("[*] Set time stamp")
sec = int(input("[?] Time to auto change ip by second (default 600s):") or "600")
loop = int(input("[?] Number of loop (default 144):") or "144")
for i in range(loop):
logger.info("[*] Change New IP")
os.system("service tor restart")
time.sleep(2)
# currentip = os.system("torsocks wget -qO- http://ipecho.net/plain 2> /dev/null ; echo")
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", 9050)
socket.socket = socks.socksocket
url = 'http://ipecho.net/plain'
request = urllib.request.Request(url)
request.add_header('Cache-Control','max-age=0')
response = urllib.request.urlopen(request)
newip = response.read().decode('utf-8')
if check_exists_tor_ip(newip):
restart_tor_service()
else:
f = open('listip.txt', 'a+')
f.write(newip + '\n')
logger.info("[*] Successfully - Your IP has been Changed")
logger.info("[*] New IP Addresss:")
print(newip)
time.sleep(sec)
logger.info("[#] The loop has finished refreshing it")
stopservice_linux()
except Exception as e:
logger.error("[x_x] Something went wrong, please check your error message.\n Message - {0}".format(e))
def restart_tor_service_linux():
logger.warning("Your IP Already Exist - Request New IP")
os.system("service tor restart")
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", 9050)
socket.socket = socks.socksocket
url = 'http://ipecho.net/plain'
request = urllib.request.Request(url)
request.add_header('Cache-Control','max-age=0')
response = urllib.request.urlopen(request)
newip = response.read().decode('utf-8')
if check_exists_tor_ip(newip):
restart_tor_service()
else:
f = open('listip.txt', 'a+')
f.write(newip + '\n')
logger.info("[*] Successfully - Your IP has been Changed")
logger.info("[*] New IP Addresss:")
print(newip)
def stopservice_linux():
try:
logger.info("[*] Stop service Tor")
os.system("service tor stop")
time.sleep(2)
logger.info("[*] Stop service Privoxy")
os.system("service privoxy stop")
os.system("clear")
except Exception as e:
logger.error("[x_x] Something went wrong, please check your error message.\n Message - {0}".format(e))
def main():
try:
detect_platform()
if (sys.platform == 'darwin'):
logger.info("[*] Darwin - MAC OS-X")
check_runon_osx()
if (sys.platform == 'linux') | (sys.platform == 'linux1') | (sys.platform == 'linux2'):
logger.info("[*] Linux - KALI LINUX - UBUNTU")
check_runon_linux()
except KeyboardInterrupt:
logger.warning("[#] KeyboardInterrupt")
if (sys.platform == 'darwin'):
stopservice_osx()
if (sys.platform == 'linux') | (sys.platform == 'linux1') | (sys.platform == 'linux2'):
stopservice_linux()
if __name__ == '__main__':
if sys.version_info < (3,0):
logger.error("[x_x] Autorip requires Python 3.x")
else:
main()
|
"""
Sponge Knowledge Base
Loading knowledge bases
"""
class Trigger1(Trigger):
def onConfigure(self):
self.withLabel("Trigger1, file3").withEvent("e1")
def onRun(self, event):
#self.logger.debug("file3: Received event {}", event)
global eventCounter
eventCounter.get(self.meta.label).incrementAndGet()
class Trigger3(Trigger):
def onConfigure(self):
self.withLabel("Trigger3, file3").withEvent("e3")
def onRun(self, event):
#self.logger.debug("file3: Received event {}", event)
global eventCounter
eventCounter.get(self.meta.label).incrementAndGet()
# Execute immediately while loading
sponge.enableAll(Trigger1, Trigger3)
def onShutdown():
sponge.logger.debug("onShutdown, file3")
|
import logging
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas_datareader import data
from pandera import Check, Column, DataFrameSchema, Index, errors
portfolio_schema = DataFrameSchema(
{
"Asset": Column(str, allow_duplicates=False),
"Share": Column(int),
"Target weight": Column(
float,
checks=[
Check(lambda s: (s >= 0.0) & (s <= 1.0)),
Check(lambda s: np.sum(s) == 1.0),
],
),
"Target Price": Column(
float, checks=[Check(lambda s: s >= 0.0)], required=False, nullable=True
),
},
index=Index(int),
strict=True,
coerce=True,
)
def validate_file(file):
try:
df_portfolio = pd.read_csv(file)
portfolio_schema(df_portfolio)
logging.info("successful file validation")
return df_portfolio
except errors.SchemaErrors as err:
logging.error(f"file validation failed: {err}")
raise err
def prices(assets, look_back, end_date=None):
if end_date:
start_date = (end_date - timedelta(days=look_back)).strftime("%Y-%m-%d")
else:
start_date = (datetime.today() - timedelta(days=look_back)).strftime("%Y-%m-%d")
df = data.DataReader(assets, "yahoo", start=start_date, end=end_date)
df = df.stack(level=1).reset_index(level=[0, 1], drop=False).reset_index(drop=True)
logging.info(f"fetched quotes for {len(assets)} tickers from yahoo api")
logging.info(f"shape of loaded dataframe: {df.shape}")
return df
|
class ShuidiConstants:
"""
Default constants for shuidi
author: weiwei
date: 20210329
"""
IP = '192.168.10.10' # default ip
PORTS = {"server": 5000}
BUFSIZE = 31001
BUFSIZE = 4096
|
# Time-stamp: <2020-01-02 15:18:16 taoliu>
"""Description: SCKMER anndata cmd
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD License (see the file LICENSE included with
the distribution).
"""
# ------------------------------------
# python modules
# ------------------------------------
import os
import sys
import logging
import scanpy as sc
import numpy as np
# ------------------------------------
# own python modules
# ------------------------------------
# ------------------------------------
# Main function
# ------------------------------------
def run( args ):
"""Compile an AnnData object from a 10X h5 file
"""
# Parse options...
options = args
# end of parsing commandline options
info = options.info
warn = options.warn
debug = options.debug
error = options.error
# take options
h5_fname = options.ifile
out_fname = options.ofile
# read
adata = sc.read_10x_h5( h5_fname )
# add n_counts and n_kmers to adata.obs
adata.obs['n_counts'] = np.sum(adata.X, axis=1).A1
adata.obs['n_kmers'] = np.sum(adata.X > 0, axis=1).A1
# add n_cells to adata.var
adata.var['n_cells'] = np.sum(adata.X > 0, axis=0).A1
# save adata to h5ad
adata.write_h5ad( filename = out_fname )
return
|
'''
Created on 2013-2-2
@author: Joseph
'''
from PySide.QtUiTools import QUiLoader
from PySide import QtGui
import sys
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
loader = QUiLoader()
widget = loader.load('untitled.ui')
widget.show()
sys.exit(app.exec_())
|
import matplotlib.pyplot as plt
import numpy as np
import pytest
import pdfstream.integration.tools
import pdfstream.integration.tools as tools
from pdfstream.integration.tools import integrate
def test_bg_sub_error():
with pytest.raises(ValueError):
tools.bg_sub(np.ones((2, 2)), np.zeros((3, 3)))
@pytest.mark.parametrize(
"case", [0]
)
def test_integrate(test_data, case):
if case == 0:
chi, setting = integrate(
test_data["Ni_img"], test_data["ai"], mask=np.ones_like(test_data["Ni_img"]),
integ_setting={"npt": 1000}
)
expect = np.zeros(1000)
assert chi.shape == (2, 1000)
assert np.array_equal(chi[1], expect)
@pytest.fixture
def user_mask(request, test_data):
if request.param == "ones":
return np.ones_like(test_data["Ni_img"])
if request.param == "zeros":
return np.zeros_like(test_data["Ni_img"])
return None
@pytest.mark.parametrize(
"user_mask, mask_setting",
[
(None, None),
(None, {"alpha": 2, "upper_thresh": 1000}),
("ones", None),
("zeros", None)
],
indirect=["user_mask"]
)
def test_auto_mask(test_data, user_mask, mask_setting):
mask, _ = pdfstream.integration.tools.auto_mask(test_data['Ni_img'], test_data['ai'], user_mask=user_mask,
mask_setting=mask_setting)
plt.matshow(mask)
plt.colorbar()
plt.show(block=False)
plt.close()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-05-22 11:53
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('feedjack', '0016_add_group_model'),
]
operations = [
migrations.CreateModel(
name='PostMark',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mark', models.CharField(choices=[(b'U', b'Unread'), (b'R', b'Read'), (b'T', b'To Read'), (b'I', b'Important'), (b'F', b'Favourite')], max_length=1, db_index=True)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='feedjack.Post', db_index=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, db_index=True)),
],
),
migrations.AddField(
model_name='post',
name='marked',
field=models.ManyToManyField(blank=True, through='feedjack.PostMark', to=settings.AUTH_USER_MODEL),
),
]
|
# -*- encoding: utf-8 -*-
# @File : binary_saver.py
# @Time : 2021/07/09 19:57:12
# @Author : Ming Ding
# @Contact : dm18@mail.tsinghua.edu.cn
# here put the import lib
import os
import sys
import math
import random
import torch
from tqdm import tqdm
import numpy as np
from .base_saver import BaseSaver
from cogdata.utils.register import register
@register
class BinarySaver(BaseSaver):
"""Save data as binary files
"""
suffix = '.bin'
max_buffer_size = 1024 * 1024 * 1024 * 10
mapping = {
'int32': torch.IntTensor,
'int64': torch.LongTensor,
'float32': torch.FloatTensor,
'uint8': torch.ByteTensor,
'bool': torch.BoolTensor
}
def __init__(self, output_path, dtype='int32', **kwargs):
# TODO test speed of buffering
self.bin = open(output_path, 'wb', buffering=128*1024*1024)
self.dtype = self.mapping[dtype]
def save(self, data):
'''
Parameters
----------
data:Tensor
write in a binary file.
'''
self.bin.write(data.type(self.dtype).numpy().tobytes()) # TODO buffer
def commit(self):
'''
Commit all buffered samples.
'''
self.bin.flush()
def __del__(self):
'''
Close and Commit.
'''
self.commit()
self.bin.close()
# @classmethod
# def merge(cls, files, output_path, overwrite=False):
# '''
# Merge files into one.
# '''
# if os.path.exists(output_path):
# if overwrite:
# os.remove(output_path)
# else:
# raise FileExistsError
# ret = os.system('cat {} >> {}'.format(
# ' '.join(files), output_path
# )) # TODO: solve possible "Argument list too long"
# if ret != 0:
# raise Exception(f'cat return code {ret}')
@classmethod
def merge(cls, files, output_path, overwrite=False):
''' Merge files into one.
Parameters
----------
files:[file pointer]
Files which need to merge
output_path:str
Path of output file.
'''
merge_file = open(output_path, 'wb')
for file_path in tqdm(files):
with open(file_path, 'rb') as data_file:
while True:
data = data_file.read(cls.max_buffer_size)
if not data:
break
merge_file.write(data)
merge_file.close()
# @classmethod
# def split(cls, input_path, output_dir, n):
# '''
# Split input_path into n files in output_path.
# '''
# os.makedirs(output_dir, exist_ok=True)
# prefix = os.path.join(output_dir, os.path.split(input_path)[-1]+'.part')
# ret = os.system('split -d {} -n {} {} -a 3'.format(
# input_path, n, prefix
# ))
# if ret != 0:
# raise Exception(f'split return code {ret}')
@classmethod
def split(cls, input_path, output_dir, n, **kwargs):
''' Split input_path into n files in output_path.
Parameters
----------
input_path:str
The path of the input binary file.
output_dir:str
The root folder of n output files.
'''
dtype = np.dtype(kwargs['dtype'])
sample_size = kwargs['length_per_sample'] * dtype.itemsize
merge_size = os.path.getsize(input_path)
assert merge_size % sample_size == 0
sample_num = merge_size // sample_size
split_size = (sample_num + n - 1) // n
size = split_size * sample_size # bytes per part
with open(input_path, 'rb') as merge_file:
for i in tqdm(range(n - 1)):
left_size = size
merge_trunk = open(os.path.join(
output_dir, os.path.split(input_path)[-1]+f".part{i}"), 'wb')
while left_size > cls.max_buffer_size:
merge_trunk.write(merge_file.read(cls.max_buffer_size))
left_size -= cls.max_buffer_size
merge_trunk.write(merge_file.read(left_size))
merge_trunk.close()
merge_trunk = open(os.path.join(output_dir, os.path.split(
input_path)[-1]+f".part{n - 1}"), 'wb')
while True:
data = merge_file.read(cls.max_buffer_size)
if not data:
break
merge_trunk.write(data)
merge_trunk.close()
|
class Observer:
_observers = []
def __init__(self):
self._observers.append(self)
self._observables = {}
def observe(self, event_name, callback):
self._observables[event_name] = callback
@property
def observers(self):
return self._observers
class IOEvent:
def __init__(self, location, text, filename, autofire=True):
self.name = "IOEvent" + location
self.text = text
self.filename = filename
if autofire: self.fire()
def fire(self):
for observer in Observer._observers:
if self.name in observer._observables:
observer._observables[self.name](self.text, self.filename)
class AlgSuccessEvent:
def __init__(self, algconfig, autofire=True):
self.name = "SuccessEvent"
self.config = algconfig
if autofire: self.fire()
def fire(self):
for observer in Observer._observers:
if self.name in observer._observables:
observer._observables[self.name](self.config)
class DualEvent:
def __init__(self, location, type, autofire=True):
self.name = "DualEvent" + location
self.type = type
if autofire: self.fire()
def fire(self):
for observer in Observer._observers:
if self.name in observer._observables:
observer._observables[self.name](self.type)
class ReloadConfigEvent:
def __init__(self, newconfig, alg_name, autofire=True):
self.name = "ReloadConfigEvent"
self.newconfig = newconfig
self.alg_name = alg_name
if autofire: self.fire()
def fire(self):
for observer in Observer._observers:
if self.name in observer._observables:
observer._observables[self.name](self.newconfig, self.alg_name)
class FormatEvent:
def __init__(self, new_format, autofire=True):
self.name = "FormatEvent"
self.format = new_format
if autofire: self.fire()
def fire(self):
for observer in Observer._observers:
if self.name in observer._observables:
observer._observables[self.name](self.format)
|
#!/bin/env python
import argparse
import pathlib
import sys
from wsgiref.util import setup_testing_defaults
from wsgiref.simple_server import make_server
import falcon
from whitenoise import WhiteNoise
font_end = pathlib.Path(__file__).resolve().parents[1] / 'front_end'
talk = falcon.API()
talk = WhiteNoise(
talk, index_file=True, autorefresh=True, root=font_end
)
def get_config():
""" Collect a config to use """
parser = argparse.ArgumentParser(description='Serve files.')
parser.add_argument(
'--port', type=int, default=7900,
help='What port should we run on?')
parser.add_argument(
'--hostname', type=str, default='0.0.0.0',
help='The IP or host address to use?')
return parser.parse_args()
def main(config):
""" Start me up! """
with make_server(config.hostname, config.port, talk) as server:
print(f"I'm online at: http://{config.hostname}:{config.port}")
server.serve_forever()
if __name__ == "__main__":
main(get_config())
|
import torch as T
from torch.utils.data import Dataset
import numpy as np
import os
import pickle as pkl
def init_pointcloud_loader(num_points):
Z = np.random.rand(num_points) + 1.
h = np.random.uniform(10., 214., size=(num_points,))
w = np.random.uniform(10., 214., size=(num_points,))
X = (w - 111.5) / 248. * -Z
Y = (h - 111.5) / 248. * Z
X = np.reshape(X, (-1, 1))
Y = np.reshape(Y, (-1, 1))
Z = np.reshape(Z, (-1, 1))
XYZ = np.concatenate((X, Y, Z), 1)
return XYZ.astype('float32')
def rgb2gray(rgb):
return np.dot(rgb[..., :3], [0.299, 0.587, 0.114])
def collate(batch):
data = [b for b in zip(*batch)]
if len(data) == 3:
init_pc, imgs, gt_pc = data
elif len(data) == 4:
init_pc, imgs, gt_pc, metadata = data
else:
raise ValueError('Unknown data values')
init_pc = T.from_numpy(np.array(init_pc)).requires_grad_(False)
imgs = T.from_numpy(np.array(imgs)).requires_grad_(False)
gt_pc = [T.from_numpy(pc).requires_grad_(False) for pc in gt_pc]
return (init_pc, imgs, gt_pc) if len(data) == 3 else (init_pc, imgs, gt_pc, metadata)
class ShapeNet(Dataset):
def __init__(self, path, grayscale=None, type='train', n_points=2000, **kwargs):
assert type in ('train', 'valid', 'test')
self.n_points = n_points
self.grayscale = grayscale
self.file_list = os.listdir(path)
self.path = path
self.type = type if type in ('train', 'test') else 'test'
self.num_vals = kwargs.pop('num_vals', 30)
self.pkl_list = []
self.sample_weights = []
for folder in self.file_list:
file_path = os.listdir(os.path.join(path, folder, self.type))
if type == 'valid':
idx = np.random.randint(len(file_path), size=self.num_vals // len(self.file_list))
file_path = [file_path[i] for i in idx]
file_path = [os.path.join(self.path, folder, self.type, f) for f in file_path]
self.pkl_list.extend(file_path)
self.sample_weights.extend([1/len(file_path)] * len(file_path))
def __len__(self):
return len(self.pkl_list)
def __getitem__(self, idx):
pkl_path = self.pkl_list[idx]
contents = pkl.load(open(pkl_path, 'rb'), encoding='latin1')
img = rgb2gray(contents[0])[..., None] if self.grayscale else contents[0]
img = (np.transpose(img / 255.0, (2, 0, 1)) - .5) * 2
pc = np.array(contents[1], 'float32')[:, :3]
pc -= np.mean(pc, 0, keepdims=True)
return init_pointcloud_loader(self.n_points), np.array(img, 'float32'), pc
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
import os.path
import pelican_shiori
AUTHOR = 'Siddhant Goel'
SITENAME = 'pelican-shiori'
SITEURL = ''
PATH = 'content'
TIMEZONE = 'Europe/Berlin'
DEFAULT_LANG = 'en'
FEED_ALL_ATOM = 'feeds/all.atom.xml'
FEED_ALL_RSS = 'feeds/all.rss.xml'
CATEGORY_FEED_ATOM = 'feeds/categories/%s.atom.xml'
CATEGORY_FEED_RSS = 'feeds/categories/%s.rss.xml'
TAG_FEED_ATOM = 'feeds/tags/%s.atom.xml'
TAG_FEED_RSS = 'feeds/tags/%s.rss.xml'
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
RSS_FEED_SUMMARY_ONLY = False
FEED_MAX_ITEMS = 5
DEFAULT_PAGINATION = 10
ARTICLE_URL = 'posts/{slug}/'
ARTICLE_SAVE_AS = 'posts/{slug}/index.html'
ARTICLE_REDIRECT_FROM = 'posts/{date:%Y}/{date:%m}/{date:%d}/{slug}/index.html'
PAGE_URL = '{slug}/'
PAGE_SAVE_AS = '{slug}/index.html'
CATEGORY_URL = 'categories/{slug}/'
CATEGORY_SAVE_AS = 'categories/{slug}/index.html'
TAG_URL = 'tags/{slug}/'
TAG_SAVE_AS = 'tags/{slug}/index.html'
TAGS_SAVE_AS = 'tags/index.html'
ARCHIVES_SAVE_AS = 'posts/index.html'
YEAR_ARCHIVE_SAVE_AS = 'posts/{date:%Y}/index.html'
MONTH_ARCHIVE_SAVE_AS = 'posts/{date:%Y}/{date:%m}/index.html'
DAY_ARCHIVE_SAVE_AS = 'posts/{date:%Y}/{date:%m}/{date:%d}/index.html'
PLUGIN_PATHS = ['plugins']
PLUGINS = ['assets']
THEME = pelican_shiori.path()
SHIORI_THEME = 'blue'
ASSET_CONFIG = pelican_shiori.asset_config(SHIORI_THEME)
|
from psyneulink import *
m1 = TransferMechanism(input_ports=["InputPort A", "InputPort B"])
m2 = TransferMechanism()
c = Composition()
c.add_node(m1, required_roles=NodeRole.INPUT)
c.add_node(m2, required_roles=NodeRole.INPUT)
c._analyze_graph()
lvoc = OptimizationControlMechanism(agent_rep=RegressionCFA,
state_features=[m1.input_ports[0], m1.input_ports[1], m2.input_port],
objective_mechanism=ObjectiveMechanism(
monitor=[m1, m2]),
function=GridSearch(max_iterations=1),
control_signals=[(SLOPE, m1), (SLOPE, m2)])
c.add_node(lvoc)
input_dict = {m1: [[1], [1]], m2: [1]}
c.show_graph(show_controller=True)
# c.run(inputs=input_dict)
|
from django.contrib.admin import site, ModelAdmin
from serveradmin.access_control.models import AccessControlGroup
class AccessControlGroupAdmin(ModelAdmin):
model = AccessControlGroup
filter_horizontal = ['members', 'applications']
site.register(AccessControlGroup, AccessControlGroupAdmin)
|
from direction import all_directions, DEFAULT_DIRECTION
from gamestate import GameState
from snakes.basesnake import BaseSnake
from snakes.utils import get_next_coord, get_dist, get_opposite_direction, get_directions
import random
class ClaustrophobicSnake(BaseSnake):
"""Goes in the direction that leads to a larger area"""
def get_next_move(self, game_state: GameState) -> str:
body = game_state.data['you']['body']
head = body[0]
# TODO: If we're starving - aim for nearby food
# If our head is close to the head of another longer snake, try to steer away if possible
for snake in game_state.data['board']['snakes']:
if snake['id'] == game_state.data['you']['id']:
continue
if len(snake['body']) <= len(game_state.data['you']['body']):
continue
d = get_dist(head, snake['body'][0])
if d <= 4:
e_directions = get_directions(head, snake['body'][0])
for e_direction in e_directions:
o_direction = get_opposite_direction(e_direction)
print(f'snake {snake["name"]} is too close and big, '
f'trying opposite direction, to the {o_direction}...')
next_head_coord = get_next_coord(head, o_direction)
if next_head_coord not in game_state.snake_body_cells and \
game_state.is_coord_on_board(next_head_coord):
return o_direction
# Move in the direction of the largest area
largest_area = 0
largest_area_directions = []
for a_dir in all_directions:
next_head_coord = get_next_coord(head, a_dir)
size = game_state.get_size_of_empty_cells(next_head_coord)
print(f'The area is of size {size} if I go {a_dir}...')
if size == largest_area:
largest_area_directions.append(a_dir)
elif size > largest_area:
largest_area_directions = [a_dir]
largest_area = size
if len(largest_area_directions) == 0:
return DEFAULT_DIRECTION
return random.choice(largest_area_directions)
|
#1
C = float(input("Enter a degree in Celsius:"))
F = (9 / 5) * C + 32
print("%.1f Celsius in %.1f Fahrenhrit"%(C,F))
#2
import math
radius = float(input("请输入半径:"))
length = float(input("请输入高:"))
area = radius * radius * math.pi
volume = area * length
print("面积为%.4f"%area)
print("体积为%.1f"%volume)
#3
feet = float(input("Enter a value for feet:"))
meters = feet * 0.305
print("%.1f feet is %.4f meters"%(feet,meters))
#4
M = float(input("Enter the amount of water in kilograms:"))
initial = float(input("Enter the initial temperature:"))
final = float(input("Enter the final temperature:"))
Q = M *(final - initial) * 4184
print("The energy needed is %.1f"%Q)
#5
balance,rate = eval(input("Enter balance and interest rate (e.g., 3 for 3%):"))
interest = balance * (rate / 1200)
print("The interest is %.5f"%interest)
#6
v0,v1,t = eval(input("Enter v0, v1 and t:"))
a = (v1 - v0) / t
print("The avergea acceleration is %.4f"%a)
#7
saving = float(input("Enter the monthly saving amount:"))
num = 0
while num < 1:
account = saving * (1 + 0.00417)
saving = account
saving += 100
num += 1
print("After the sixth month, the account value is %f"%account)
#8
num = int(input("Enter a number between 0 and 1000:"))
bai = num // 100
shi = (num % 100) // 10
ge = num % 10
he = bai + shi + ge
print("The sum of the digits is %d"%he)
|
from paramiko import RSAKey
from paramiko import SSHClient
from paramiko import AutoAddPolicy
import sys
if len(sys.argv) != 3:
print("ERROR! Incorrect number of arguments")
sys.exit(1)
worker_ip = sys.argv[1]
key_file = sys.argv[2]
key = RSAKey.from_private_key_file(key_file)
client = SSHClient()
client.set_missing_host_key_policy(AutoAddPolicy())
client.connect(worker_ip, timeout = 30, pkey = key)
(stdin, stdout, stderr) = client.exec_command("sudo rm -rf /mnt/huge/*")
(stdin, stdout, stderr) = client.exec_command("sudo rm -rf repository")
(stdin, stdout, stderr) = client.exec_command("sudo rm -rf stats")
(stdin, stdout, stderr) = client.exec_command("sudo rm -f worker.sh")
(stdin, stdout, stderr) = client.exec_command("sudo rm -f helper-functions.sh")
(stdin, stdout, stderr) = client.exec_command("sudo reboot")
print("Successfully sent {} to reboot".format(worker_ip))
client.close()
|
import clusto_query
class OperatorType(clusto_query.query.QueryType):
def __new__(mcs, cls, *args, **kwargs):
constructed_class = super(OperatorType, mcs).__new__(mcs, cls, *args, **kwargs)
if getattr(constructed_class, "operator_map", None) is not None and \
getattr(constructed_class, "operator", None) is not None:
if isinstance(constructed_class.operator, basestring):
constructed_class.operator_map[constructed_class.operator] = constructed_class
else:
for operator in constructed_class.operator:
constructed_class.operator_map[operator] = constructed_class
return constructed_class
class Operator(clusto_query.query.QueryObject):
__metaclass__ = OperatorType
def __init__(self, *parameters):
self.parameters = parameters
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, self.parameters)))
def visit_iter(self):
yield self
for param in self.parameters:
if isinstance(param, clusto_query.query.QueryObject):
for p in param.visit_iter():
yield p
else:
yield param
|
from mdpo.po2md import Po2Md
def test_disabled_entries(tmp_file):
po_content = '''#
msgid ""
msgstr ""
msgid "foo"
msgstr "translated foo"
'''
markdown_content = '''# foo
<!-- mdpo-translator Message for translator -->
<!-- mdpo-context Context -->
<!-- mdpo-disable-next-line -->
this should be in disabled_entries
'''
with tmp_file(po_content, '.po') as po_filepath:
po2md = Po2Md(po_filepath)
output = po2md.translate(markdown_content)
assert output == '# translated foo\n\nthis should be in disabled_entries\n'
assert len(po2md.disabled_entries) == 1
disabled_entry = po2md.disabled_entries[0]
assert disabled_entry.msgid == 'this should be in disabled_entries'
assert disabled_entry.msgstr == ''
assert disabled_entry.msgctxt == 'Context'
assert disabled_entry.tcomment == 'Message for translator'
|
from django.db import models
class Event(models.Model):
event_cod = models.CharField('Codigo Identificacion', max_length=200, unique=True)
event_nom = models.CharField('Nombre del Evento', max_length=200)
event_date_init = models.DateField('Fecha de Inicio', auto_now=False, auto_now_add=False, null=True, blank=True)
even_date_end = models.DateField('Fecha de Finalizacion', auto_now=False, auto_now_add=False, null=True, blank=True)
event_site = models.TextField('Lugar del Evento', max_length=500, null=True, blank=True)
event_url = models.URLField('Pagina Web', null=True, blank=True)
def __str__(self):
return self.event_cod
class Magazine(models.Model):
magazine_code = models.CharField('Cidigo de la Revista', max_length=200, unique=True)
magazine_name = models.CharField('Nombre de la Revista', max_length=200)
magazine_date = models.DateField('Fecha de Publicacion', auto_now=False, auto_now_add=False, null=True, blank=True)
magazine_arb = models.BooleanField('Registra Arbitrada?')
magazine_cenditel = models.BooleanField('Realizada en CENDITEL?')
magazine_url = models.URLField('Pagina Web', null=True, blank=True)
def __str__(self):
return self.magazine_code
class Course(models.Model):
course_code = models.CharField('Codigo del Curso', max_length=200, unique=True)
course_name = models.CharField('Nombre del Curso', max_length=200)
course_date_init = models.DateField('Fecha de Inicio', auto_now=False, auto_now_add=False, null=True, blank=True)
course_date_end = models.DateField('Fecha de Finalizacion', auto_now=False, auto_now_add=False, null=True, blank=True)
# course_enrolled
def __str__(self):
return self.course_code
class Project(models.Model):
project_code = models.CharField('Codigo del Proyecto', max_length=200, unique=True)
project_name = models.CharField('Nombre del Proyecto', max_length=200)
project_date_init = models.DateField('Fecha de Inicio', auto_now=False, auto_now_add=False, null=True, blank=True)
project_date_end = models.DateField('Fecha de Finalizacion', auto_now=False, auto_now_add=False, null=True, blank=True)
project_poa = models.BooleanField('Pertenece al POA?')
def __str__(self):
return self.project_code
class Book(models.Model):
book_code = models.CharField('Codigo del Libro', max_length=200, unique=True)
book_name = models.CharField('Nombre del Libro', max_length=200)
book_date = models.DateField('Fecha de Publicación', auto_now=False, auto_now_add=False, null=True, blank=True)
book_cenditel = models.BooleanField('Realizado en CENDITEL?')
book_url = models.URLField('Pagina Web', max_length=200, null=True, blank=True)
def __str__(self):
return self.book_code
class Participant(models.Model):
participant_code = models.CharField('Codigo del Participante', max_length=10, unique=True)
participant_document = models.CharField('Cedula de Identidad', max_length=200, unique=True)
participant_name = models.CharField('Nombre', max_length=200)
participant_lastname = models.CharField('Apellido', max_length=200)
participant_email = models.EmailField('Correo Electronico')
def __str__(self):
return self.participant_code
class Researcher(models.Model):
researcher_code = models.CharField('Codigo del Participante', max_length=10, unique=True)
researcher_document = models.CharField('Cedula de Identidad', max_length=200, unique=True)
researcher_name = models.CharField('Nombre', max_length=200)
researcher_lastname = models.CharField('Apellido', max_length=200)
#researcher_projects
reseracher_active = models.BooleanField('Se encuentra activo?')
def __str__(self):
return self.researcher_code
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from detectron2.structures import Instances, Boxes
from .backbone import EfficientDetWithLoss
class EfficientDetModel(nn.Module):
"""Detectron2 model:
https://github.com/zylo117/Yet-Another-EfficientDet-Pytorch
"""
def __init__(self, compound_coef, model_args=None):
super().__init__()
num_classes = model_args.get("num_classes", None)
pretrained = model_args.get("pretrained", False)
self.max_bbox = model_args.get("max_bbox", 30)
self.model = EfficientDetWithLoss(num_classes=num_classes,
compound_coef=compound_coef,
load_weights=pretrained)
@staticmethod
def to_numpy(v):
if isinstance(v, np.ndarray):
return v
else:
return v.detach().cpu().numpy()
def forward(self, x):
N = len(x)
imgs = torch.stack([sample['image'].float() for sample in x])
annotations = np.ones((N, self.max_bbox, 5)) * -1
for i, sample in enumerate(x):
instances = sample['instances']
boxes = self.to_numpy(instances.gt_boxes.tensor)
class_id = self.to_numpy(instances.gt_classes)
annotation = np.concatenate([boxes, class_id[:, np.newaxis]], 1)
if len(class_id) > self.max_bbox:
annotation = annotation[:self.max_bbox, :]
annotations[i, :len(class_id), :] = annotation
annotations = torch.from_numpy(annotations)
return self.model(imgs, annotations, is_train)
def infer(self, x):
imgs = torch.stack([sample['image'].float() for sample in x])
rois = self.model.infer(imgs)
outs = []
for sample_input, sample_output in zip(x, rois):
instances = Instances(
(sample_input['height'], sample_input['width']))
instances.pred_boxes = Boxes(sample_output['rois'])
instances.scores = torch.tensor(sample_output['scores'])
instances.pred_classes = torch.tensor(sample_output['class_ids'])
outs.append({"instances": instances})
return outs
class EfficientDetD0(EfficientDetModel):
def __init__(self, model_args=None):
super().__init__(0, model_args)
class EfficientDetD1(EfficientDetModel):
def __init__(self, model_args=None):
super().__init__(1, model_args)
class EfficientDetD2(EfficientDetModel):
def __init__(self, model_args=None):
super().__init__(2, model_args)
class EfficientDetD3(EfficientDetModel):
def __init__(self, model_args=None):
super().__init__(3, model_args)
class EfficientDetD4(EfficientDetModel):
def __init__(self, model_args=None):
super().__init__(4, model_args)
class EfficientDetD5(EfficientDetModel):
def __init__(self, model_args=None):
super().__init__(5, model_args)
class EfficientDetD6(EfficientDetModel):
def __init__(self, model_args=None):
super().__init__(6, model_args)
class EfficientDetD7(EfficientDetModel):
def __init__(self, model_args=None):
super().__init__(7, model_args)
class EfficientDetD7X(EfficientDetModel):
def __init__(self, model_args=None):
super().__init__(8, model_args)
|
import srt, re
from timecode import Timecode
from datetime import datetime
from coldtype.time.sequence import Sequence, Clip, ClipTrack
def srt_to_frame(fps, st):
tc = Timecode(fps,
srt.timedelta_to_srt_timestamp(st).replace(",", "."))
return tc.frame_number-86400-21600
class SRT(Sequence):
def __init__(self, path, fps, storyboard=None, workarea_track=0):
self.path = path
subs = srt.parse(path.read_text())
clips = []
for sidx, s in enumerate(subs):
clips.append(Clip(
re.sub(r"<[^>]+>", "", s.content).strip(),
srt_to_frame(fps, s.start),
srt_to_frame(fps, s.end),
idx=sidx,
track=0))
track = ClipTrack(self, clips, [])
super().__init__(track.duration(), fps, storyboard, [track])
|
print [1, 2, 3]
print [1, 2, 3] + [4, 5, 6]
print ['Hi!'] * 4
print 3 * [1, 2, 3]
a = ["Hello", 2.00, 4, 4 + 5, 2 * 4.9, "World" * 3]
print a
print a * 3
print a + [" world"]
print a[0]
print a[1]
print a[1:3:1]
print a[1:5:1][1:3:1]
|
from .core import SuggestionBox
__red_end_user_data_statement__ = (
"This cog stores data provided to it by command as needed for operation. "
"As this data is for suggestions to be given from a user to a community, "
"it is not reasonably considered end user data and will "
"not be deleted except as required by Discord."
)
def setup(bot):
bot.add_cog(SuggestionBox(bot))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.