content
stringlengths 5
1.05M
|
|---|
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Example script for generating random input from an ONNX model and running
the model both through the DeepSparse Engine and ONNXRuntime, comparing
outputs to confirm they are the same.
In this method, we can assume that ONNXRuntime will give the
"correct" output as it is the industry-standard solution.
##########
Command help:
usage: check_correctness.py [-h] [-s BATCH_SIZE] onnx_filepath
Run an ONNX model, comparing outputs between the DeepSparse Engine and ONNXRuntime
positional arguments:
onnx_filepath The full filepath of the ONNX model file being run
optional arguments:
-h, --help show this help message and exit
-s BATCH_SIZE, --batch_size BATCH_SIZE
The batch size to run the analysis for
##########
Example command for checking a downloaded resnet50 model
for batch size 8:
python examples/benchmark/check_correctness.py \
~/Downloads/resnet50.onnx \
--batch_size 8
"""
import argparse
import onnxruntime
from deepsparse import compile_model, cpu
from deepsparse.utils import (
generate_random_inputs,
get_input_names,
get_output_names,
override_onnx_batch_size,
verify_outputs,
)
CORES_PER_SOCKET, AVX_TYPE, _ = cpu.cpu_details()
def parse_args():
parser = argparse.ArgumentParser(
description=(
"Run an ONNX model, comparing outputs between the DeepSparse Engine and"
" ONNXRuntime"
)
)
parser.add_argument(
"onnx_filepath",
type=str,
help="The full filepath of the ONNX model file being run",
)
parser.add_argument(
"-s",
"--batch_size",
type=int,
default=1,
help="The batch size to run the analysis for",
)
return parser.parse_args()
def main():
args = parse_args()
onnx_filepath = args.onnx_filepath
batch_size = args.batch_size
inputs = generate_random_inputs(onnx_filepath, batch_size)
input_names = get_input_names(onnx_filepath)
output_names = get_output_names(onnx_filepath)
inputs_dict = {name: value for name, value in zip(input_names, inputs)}
# ONNXRuntime inference
print("Executing model with ONNXRuntime...")
sess_options = onnxruntime.SessionOptions()
with override_onnx_batch_size(onnx_filepath, batch_size) as override_onnx_filepath:
ort_network = onnxruntime.InferenceSession(override_onnx_filepath, sess_options)
ort_outputs = ort_network.run(output_names, inputs_dict)
# DeepSparse Engine inference
print("Executing model with DeepSparse Engine...")
dse_network = compile_model(onnx_filepath, batch_size=batch_size)
dse_outputs = dse_network(inputs)
verify_outputs(dse_outputs, ort_outputs)
print("DeepSparse Engine output matches ONNXRuntime output")
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
"""
Bundle of methods for handling images. Rather than manipulating specialized
operations in images methods in this module are used for loading, outputting
and format-converting methods, as well as color manipulation.
SUPPORTED FORMATS
see http://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html#imread
+---------------------------+--------------------------------------------------+
| Format | Extension |
+===========================+==================================================+
| Windows bitmaps | \*.bmp, \*.dib (always supported) |
+---------------------------+--------------------------------------------------+
| JPEG files | \*.jpeg, \*.jpg, \*.jpe (see the Notes section) |
+---------------------------+--------------------------------------------------+
| JPEG 2000 files | \*.jp2 (see the Notes section) |
+---------------------------+--------------------------------------------------+
| Portable Network Graphics | \*.png (see the Notes section) |
+---------------------------+--------------------------------------------------+
| Portable image format | \*.pbm, \*.pgm, \*.ppm (always supported) |
+---------------------------+--------------------------------------------------+
| Sun rasters | \*.sr, \*.ras (always supported) |
+---------------------------+--------------------------------------------------+
| TIFF files | \*.tiff, \*.tif (see the Notes section) |
+---------------------------+--------------------------------------------------+
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from builtins import zip
from builtins import range
from past.builtins import basestring
from builtins import object
from .directory import getData, mkPath, getPath, increment_if_exits
from .config import FLOAT, INT, MANAGER
import cv2
import os
import numpy as np
from .arrayops.basic import (anorm, polygonArea, im2shapeFormat, angle,
vectorsAngles, overlay, standarizePoints,
splitPoints)
from .root import glob
# from pyqtgraph import QtGui #BUG in pydev ImportError: cannot import
# name QtOpenGL
from .cache import cache, ResourceManager
from collections import MutableSequence
from .directory import (getData, strdifference, changedir, checkFile,
getFileHandle, increment_if_exits, mkPath)
import matplotlib.axes
import matplotlib.figure
from .plotter import Plotim, limitaxis
from .serverServices import parseString, string_is_socket_address
supported_formats = ("bmp", "dib", "jpeg", "jpg", "jpe", "jp2", "png",
"pbm", "pgm", "ppm", "sr", "ras", "tiff", "tif")
def transposeIm(im):
if len(im.shape) == 2:
return im.transpose(1, 0)
else:
return im.transpose(1, 0, 2)
#from matplotlib import colors
# colors to use
green = (0, 255, 0)
red = (0, 0, 255)
white = (255, 255, 255)
orange = (51, 103, 236)
black = (0, 0, 0)
blue = (255, 0, 0)
# dictionary of colors to use
colors = {
"blue": blue,
"green": green,
"red": red,
"white": white,
"orange": orange,
"black": black}
# look for these as <numpy array>.dtype.names
bgra_dtype = np.dtype({'b': (np.uint8, 0), 'g': (
np.uint8, 1), 'r': (np.uint8, 2), 'a': (np.uint8, 3)})
def plt2bgr(image):
if isinstance(image, matplotlib.axes.SubplotBase):
image = fig2bgr(image.figure)
elif isinstance(image, matplotlib.figure.Figure):
image = fig2bgr(image)
return image
def plt2bgra(image):
if isinstance(image, matplotlib.axes.SubplotBase):
image = fig2bgra(image.figure)
elif isinstance(image, matplotlib.figure.Figure):
image = fig2bgra(image)
return image
def fig2bgr(fig):
"""
Convert a Matplotlib figure to a RGB image.
:param fig: a matplotlib figure
:return: RGB image.
"""
fig.canvas.draw()
buf = np.fromstring(fig.canvas.tostring_rgb(),
dtype=np.uint8, sep='') # get bgr
return buf.reshape(fig.canvas.get_width_height()[::-1] + (3,))
def np2str(arr):
return arr.tostring()
def str2np(string, shape):
buf = np.fromstring(string, dtype=np.uint8, sep='') # get bgr
return buf.reshape(shape)
def fig2bgra(fig):
"""
Convert a Matplotlib figure to a RGBA image.
:param fig: a matplotlib figure
:return: RGBA image.
"""
# http://www.icare.univ-lille1.fr/drupal/node/1141
# http://stackoverflow.com/questions/7821518/matplotlib-save-plot-to-numpy-array
# draw the renderer
fig.canvas.draw()
# Get the RGBA buffer from the figure
buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8) # get bgra
buf = buf.reshape(fig.canvas.get_width_height()[
::-1] + (4,)) # reshape to h,w,c
return np.roll(buf, 3, axis=2) # correct channels
def qi2np(qimage, dtype='array'):
"""
Convert QImage to numpy.ndarray. The dtype defaults to uint8
for QImage.Format_Indexed8 or `bgra_dtype` (i.e. a record array)
for 32bit color images. You can pass a different dtype to use, or
'array' to get a 3D uint8 array for color images.
source from: https://kogs-www.informatik.uni-hamburg.de/~meine/software/vigraqt/qimage2ndarray.py
"""
from pyqtgraph import QtGui
if qimage.isNull():
raise IOError("Image is Null")
result_shape = (qimage.height(), qimage.width())
temp_shape = (qimage.height(), qimage.bytesPerLine() * 8 / qimage.depth())
if qimage.format() in (QtGui.QImage.Format_ARGB32_Premultiplied,
QtGui.QImage.Format_ARGB32,
QtGui.QImage.Format_RGB32):
if dtype == 'rec':
dtype = bgra_dtype
elif dtype == 'array':
dtype = np.uint8
result_shape += (4, )
temp_shape += (4, )
elif qimage.format() == QtGui.QImage.Format_Indexed8:
dtype = np.uint8
else:
raise ValueError("qi2np only supports 32bit and 8bit images")
# FIXME: raise error if alignment does not match
buf = qimage.bits().asstring(qimage.numBytes())
result = np.frombuffer(buf, dtype).reshape(result_shape)
if result_shape != temp_shape:
result = result[:, :result_shape[1]]
if qimage.format() == QtGui.QImage.Format_RGB32 and dtype == np.uint8:
result = result[..., :3]
return result
def np2qi(array):
"""
Convert numpy array to Qt Image.
source from: https://kogs-www.informatik.uni-hamburg.de/~meine/software/vigraqt/qimage2ndarray.py
:param array:
:return:
"""
if np.ndim(array) == 2:
return gray2qi(array)
elif np.ndim(array) == 3:
return rgb2qi(array)
raise ValueError("can only convert 2D or 3D arrays")
def gray2qi(gray):
"""
Convert the 2D numpy array `gray` into a 8-bit QImage with a gray
colormap. The first dimension represents the vertical image axis.
ATTENTION: This QImage carries an attribute `ndimage` with a
reference to the underlying numpy array that holds the data. On
Windows, the conversion into a QPixmap does not copy the data, so
that you have to take care that the QImage does not get garbage
collected (otherwise PyQt will throw away the wrapper, effectively
freeing the underlying memory - boom!).
source from: https://kogs-www.informatik.uni-hamburg.de/~meine/software/vigraqt/qimage2ndarray.py
"""
from pyqtgraph import QtGui
if len(gray.shape) != 2:
raise ValueError("gray2QImage can only convert 2D arrays")
gray = np.require(gray, np.uint8, 'C')
h, w = gray.shape
result = QtGui.QImage(gray.data, w, h, QtGui.QImage.Format_Indexed8)
result.ndarray = gray # let object live to avoid garbage collection
"""
for i in xrange(256):
result.setColor(i, QtGui.QColor(i, i, i).rgb())"""
return result
def rgb2qi(rgb):
"""
Convert the 3D numpy array `rgb` into a 32-bit QImage. `rgb` must
have three dimensions with the vertical, horizontal and RGB image axes.
ATTENTION: This QImage carries an attribute `ndimage` with a
reference to the underlying numpy array that holds the data. On
Windows, the conversion into a QPixmap does not copy the data, so
that you have to take care that the QImage does not get garbage
collected (otherwise PyQt will throw away the wrapper, effectively
freeing the underlying memory - boom!).
source from: https://kogs-www.informatik.uni-hamburg.de/~meine/software/vigraqt/qimage2ndarray.py
"""
from pyqtgraph import QtGui
if len(rgb.shape) != 3:
raise ValueError("rgb2QImage can only convert 3D arrays")
if rgb.shape[2] not in (3, 4):
raise ValueError("rgb2QImage can expects the last dimension to contain "
"exactly three (R,G,B) or four (R,G,B,A) channels")
h, w, channels = rgb.shape
# Qt expects 32bit BGRA data for color images:
bgra = np.empty((h, w, 4), np.uint8, 'C')
bgra[:, :, 2] = rgb[:, :, 2]
bgra[:, :, 1] = rgb[:, :, 1]
bgra[:, :, 0] = rgb[:, :, 0]
# dstack, dsplit, stack
if rgb.shape[2] == 3:
bgra[..., 3].fill(255)
fmt = QtGui.QImage.Format_RGB32
else:
bgra[..., 3] = rgb[..., 3]
fmt = QtGui.QImage.Format_ARGB32
result = QtGui.QImage(bgra.data, w, h, fmt)
result.ndarray = bgra # let object live to avoid garbage collection
return result
# STABLE FUNCTIONS
def bgra2bgr(im, bgrcolor=colors["white"]):
"""
Convert BGR to BGRA image.
:param im: image
:param bgrcolor: BGR color representing transparency. (information is lost when
converting BGRA to BGR) e.g. [200,200,200].
:return:
"""
# back[chanel] = bgr[chanel]*(bgr[3]/255.0) + back[chanel]*(1-bgr[3]/255.0)
temp = im.shape
im2 = np.zeros((temp[0], temp[1], 3), np.uint8)
im2[:, :, :] = bgrcolor
for c in range(0, 3): # looping over channels
im2[:, :, c] = (im[:, :, c] * (im[:, :, 3] / 255.0) + im2[:, :, c] *
(1.0 - im[:, :, 3] / 255.0))
return im2
def convertAs(fns, base=None, folder=None, name=None, ext=None,
overwrite=False, loader=None, simulate=False):
"""
Reads a file and save as other file based in a pattern.
:param fns: file name or list of file names. It supports glob operations.
By default glob operations ignore folders.
:param base: path to place images.
:param folder: (None) folder to place images in base's path.
If True it uses the folder in which image was loaded.
If None, not folder is used.
:param name: string for formatting new name of image with the {name} tag.
Ex: if name is 'new_{name}' and image is called 'img001' then the
formatted new image's name is 'new_img001'
:param ext: (None) extension to save all images. If None uses the same extension
as the loaded image.
:param overwrite: (False) If True and the destine filename for saving already
exists then it is replaced, else a new filename is generated
with an index "{name}_{index}.{extension}"
:param loader: (None) loader for the image file to change image attributes.
If None reads the original images untouched.
:param simulate: (False) if True, no saving is performed but the status is returned
to confirm what images where adequately processed.
:return: list of statuses (0 - no error, 1 - image not loaded,
2 - image not saved, 3 - error in processing image)
"""
if loader is None:
loader = loadFunc(1)
if isinstance(fns, basestring):
filelist = glob(fns) # list
else: # is an iterator
filelist = []
for f in fns:
filelist.extend(glob(f))
if base is None:
base = ''
# ensures that path from base ends with separator
if base:
base = os.path.join(base, "")
replaceparts = getData(base) # from base get parts
# ensures that extension starts with point "."
if isinstance(ext, basestring) and not ext.startswith("."):
ext = "." + ext # correct extension
status = []
for file in filelist:
parts = getData(file) # file parts
# replace drive
if replaceparts[0]:
parts[0] = replaceparts[0]
# replace root
if replaceparts[1]:
if folder is True:
parts[1] = os.path.join(replaceparts[1],
os.path.split(os.path.split(parts[1])[0])[1], "")
elif isinstance(folder, basestring):
parts[1] = os.path.join(replaceparts[1], folder, "")
else:
parts[1] = replaceparts[1]
# to replace basic name
if isinstance(name, basestring):
parts[2] = name.format(name=parts[2])
if isinstance(ext, basestring):
parts[3] = ext # replace extension
newfile = "".join(parts)
if not overwrite:
newfile = increment_if_exits(newfile)
try:
im = loader(file)
# image not loaded
if im is None:
status.append((file, 1, newfile))
continue
# image successfully saved
if simulate:
status.append((file, 0, newfile))
continue
else:
mkPath("".join(parts[:2]))
if cv2.imwrite(newfile, im):
status.append((file, 0, newfile))
continue
# image not saved
status.append((file, 2, newfile))
except:
# an error in the process
status.append((file, 3, newfile))
return status
def checkLoaded(obj, fn="", raiseError=False):
"""
Simple function to determine if variable is valid.
:param obj: loaded object
:param fn: path of file
:param raiseError: if True and obj is None, raise
:return: None
"""
if obj is not None:
print(fn, " Loaded...")
else:
print(fn, " Could not be loaded...")
if raiseError:
raise
def loadcv(path, flags=-1, shape=None):
"""
Simple function to load using opencv.
:param path: path to image.
:param flag: openCV flags:
+-------+------------------------------+--------+
| value | openCV flag | output |
+=======+==============================+========+
| (1) | cv2.CV_LOAD_IMAGE_COLOR | BGR |
+-------+------------------------------+--------+
| (0) | cv2.CV_LOAD_IMAGE_GRAYSCALE | GRAY |
+-------+------------------------------+--------+
| (-1) | cv2.CV_LOAD_IMAGE_UNCHANGED | format |
+-------+------------------------------+--------+
:param shape: shape to resize image.
:return: loaded image
"""
im = cv2.imread(path, flags)
if shape:
im = cv2.resize(im, shape)
return im
def loadsfrom(path, flags=cv2.IMREAD_COLOR):
"""
Loads Image from URL or file.
:param path: filepath or url
:param flags: openCV flags:
+-------+------------------------------+--------+
| value | openCV flag | output |
+=======+==============================+========+
| (1) | cv2.CV_LOAD_IMAGE_COLOR | BGR |
+-------+------------------------------+--------+
| (0) | cv2.CV_LOAD_IMAGE_GRAYSCALE | GRAY |
+-------+------------------------------+--------+
| (-1) | cv2.CV_LOAD_IMAGE_UNCHANGED | format |
+-------+------------------------------+--------+
:return:
"""
if isinstance(path, basestring):
if path.endswith(".npy"): # reads numpy arrays
return np.lib.load(path, None)
resp = getFileHandle(path) # download the image
else:
resp = path # assume path is a file-like object ie. cStringIO or file
# nparr = np.asarray(bytearray(resp.read()), dtype=dtype) # convert it to
# a NumPy array
nparr = np.fromstring(resp.read(), dtype=np.uint8)
image = cv2.imdecode(nparr, flags=flags) # decode using OpenCV format
return image
def interpretImage(toparse, flags):
"""
Interprets to get image.
:param toparse: string to parse or array. It can interpret:
* connection to server (i.e. host:port)
* path to file (e.g. /path_to_image/image_name.ext)
* URL to image (e.g. http://domain.com/path_to_image/image_name.ext)
* image as string (i.g. numpy converted to string)
* image itself (i.e. numpy array)
:param flags: openCV flags:
+-------+------------------------------+--------+
| value | openCV flag | output |
+=======+==============================+========+
| (1) | cv2.CV_LOAD_IMAGE_COLOR | BGR |
+-------+------------------------------+--------+
| (0) | cv2.CV_LOAD_IMAGE_GRAYSCALE | GRAY |
+-------+------------------------------+--------+
| (-1) | cv2.CV_LOAD_IMAGE_UNCHANGED | format |
+-------+------------------------------+--------+
:return: image or None if not successfull
"""
# test it is from server
if string_is_socket_address(toparse): # process request to server
toparse = parseString(toparse, 5)
# test is object itself
if type(toparse).__module__ == np.__name__: # test numpy array
if flags == 1:
return im2shapeFormat(toparse, (0, 0, 3))
if flags == 0:
return im2shapeFormat(toparse, (0, 0))
return toparse
# test image in string
try:
return cv2.imdecode(toparse, flags)
except TypeError:
# test path to file or URL
return loadsfrom(toparse, flags)
class ImFactory(object):
"""
image factory for RRToolbox to create scripts to standardize loading images and
provide lazy loading (it can load images from disk with the customized options
and/or create mapping images to load when needed) to conserve memory.
.. warning:: In development.
"""
_interpolations = {"nearest": 0, "bilinear": 1,
"bicubic": 2, "area": 3, "lanczos": 4}
_convertions = {}
def __init__(self, **kwargs):
"""
:param kwargs:
:return:
An image can be represented as a matrix of width "W" and height "H" with elements
called pixels,each pixel is a representation of a color in one point of a plane
(2D dimension). In the case of openCV and many other libraries for image manipulation,
the use of numpy arrays as base for image representation is becoming the standard
(numpy is a fast and powerful library for array manipulation and one of the main modules
for scientific development in python). A numpy matrix with n rows and m columns has a
shape (n,m), that in an Image is H,W which in a Cartesian plane would be y,x.
if image is W,H = 100,100 then
dsize = (W,H) = (300,100) would be the same as fsize = (fx,fy) = (3,1)
after the image is loaded in a numpy array the image would have shape
(n,m) = (rows,cols) = (H,W) = im.shape
"""
self.path = None # path to use to load image
self.mmap_mode = None # mapping file modes
self.mmap_path = None # path to create numpy file; None, do not create mapping file
self.w = None
self.h = None
self.fx = None
self.fy = None
self.convert = None
self.interpolation = None
self.throw = True
self.update(**kwargs)
# TODO not finished
def update(self, **kwargs):
for key, value in kwargs.items():
if hasattr(self, key):
setattr(self, key, value)
else:
raise Exception("Not attribute '{}'".format(key))
def get_Func(self):
"""
gets the loading function
"""
pass
def get_code(self):
"""
get the script code
"""
pass
def get_errorFunc(self, path=None, throw=None):
def errorFunc(im):
if throw and im is None:
if checkFile(path):
if getData(path)[-1] in supported_formats:
raise Exception(
"Not enough permissions to load '{}'".format(path))
else:
raise Exception(
"Failed to load '{}'. Format not supported".format(path))
else:
raise Exception("Missing file '{}'".format(path))
return {None: errorFunc}
def get_loadFunc(self, flag=None):
def loadFunc(path):
return cv2.imread(path, flag)
return {"im": loadFunc}
def get_resizeFunc(self, dsize=None, dst=None, fx=None, fy=None, interpolation=None):
# see
# http://docs.opencv.org/2.4/modules/imgproc/doc/geometric_transformations.html#resize
fx, fy, interpolation = fx or 0, fy or 0, interpolation or 0
def resizeFunc(im):
return cv2.resize(im, dsize, dst, fx, fy, interpolation)
return {"im": resizeFunc}
def get_mapFunc(self, flag=None, RGB=None, mpath=None, mode=None,
func=None, dsize=None, dst=None, fx=None, fy=None,
interpolation=None):
def mapFunc(path):
if mpath == "*": # save mmap in working directory
drive, dirname, (filename, ext) = "", "", getData(path)[-2:]
elif mpath: # save mmap in mpath
drive, dirname, filename, ext = getData(changedir(path, mpath))
else: # save mmap in image path
drive, dirname, filename, ext = getData(path)
# THIS CREATES ONE HASHED FILE
hashed = hash("{}{}{}{}{}{}".format(
flag, RGB, dsize, fx, fy, interpolation))
savepath = "{}{}{}{}.{}.npy".format(
drive, dirname, filename, ext, hashed)
try: # load from map
# mapper(savepath,None,mode,True)[0]#
return np.lib.load(savepath, mode)
except IOError: # create object and map
im = func(path)
if im is None: # this is regardless of throw flag
raise Exception("Failed to load image to map")
np.save(savepath, im)
# mapper(savepath,im,mode,True)[0]#
return np.lib.load(savepath, mode)
return {"im": mapFunc}
def get_transposeFunc(self):
def transposeFunc(im):
if len(im.shape) == 2:
return im.transpose(1, 0)
else:
# np.ascontiguousarray?
# http://stackoverflow.com/a/27601130/5288758
return im.transpose(1, 0, 2)
return {"im": transposeFunc}
def get_convertionFunc(self, code):
def convertionFunc(im):
return cv2.cvtColor(im, code)
return {"im": convertionFunc}
def get_np2qi(self):
return {"im": np2qi}
def loadFunc(flag=0, dsize=None, dst=None, fx=None, fy=None, interpolation=None,
mmode=None, mpath=None, throw=True, keepratio=True):
"""
Creates a function that loads image array from path, url,
server, string or directly from numpy array (supports databases).
:param flag: (default: 0) 0 to read as gray, 1 to read as BGR, -1 to
read as BGRA, 2 to read as RGB, -2 to read as RGBA.
It supports openCV flags:
* cv2.CV_LOAD_IMAGE_COLOR
* cv2.CV_LOAD_IMAGE_GRAYSCALE
* cv2.CV_LOAD_IMAGE_UNCHANGED
+-------+-------------------------------+--------+
| value | openCV flag | output |
+=======+===============================+========+
| (2) | N/A | RGB |
+-------+-------------------------------+--------+
| (1) | cv2.CV_LOAD_IMAGE_COLOR | BGR |
+-------+-------------------------------+--------+
| (0) | cv2.CV_LOAD_IMAGE_GRAYSCALE | GRAY |
+-------+-------------------------------+--------+
| (-1) | cv2.CV_LOAD_IMAGE_UNCHANGED | BGRA |
+-------+-------------------------------+--------+
| (-2) | N/A | RGBA |
+-------+-------------------------------+--------+
:param dsize: (None) output image size; if it equals zero, it is computed as:
\texttt{dsize = Size(round(fx*src.cols), round(fy*src.rows))}
If (integer,None) or (None,integer) it completes the values according
to keepratio parameter.
:param dst: (None) output image; it has the size dsize (when it is non-zero) or the
size computed from src.size(), fx, and fy; the type of dst is uint8.
:param fx: scale factor along the horizontal axis
:param fy: scale factor along the vertical axis
:param interpolation: interpolation method compliant with opencv:
+-----+-----------------+-------------------------------------------------------+
|flag | Operation | Description |
+=====+=================+=======================================================+
|(0) | INTER_NEAREST | nearest-neighbor interpolation |
+-----+-----------------+-------------------------------------------------------+
|(1) | INTER_LINEAR | bilinear interpolation (used by default) |
+-----+-----------------+-------------------------------------------------------+
|(2) | INTER_CUBIC | bicubic interpolation over 4x4 pixel neighborhood |
+-----+-----------------+-------------------------------------------------------+
|(3) | INTER_AREA | resampling using pixel area relation. |
| | | It may be a preferred method for image decimation, |
| | | as it gives moire’-free results. But when the image |
| | | is zoomed, it is similar to the INTER_NEAREST method. |
+-----+-----------------+-------------------------------------------------------+
|(4) | INTER_LANCZOS4 | Lanczos interpolation over 8x8 pixel neighborhood |
+-----+-----------------+-------------------------------------------------------+
:param mmode: (None) mmode to create mapped file. if mpath is specified loads image, converts
to mapped file and then loads mapping file with mode {None, 'r+', 'r', 'w+', 'c'}
(it is slow for big images). If None, loads mapping file to memory (useful to keep
image copy for session even if original image is deleted or modified).
:param mpath: (None) path to create mapped file.
None, do not create mapping file
"", uses path directory;
"*", uses working directory;
else, uses specified directory.
:param keepratio: True to keep image ratio when completing data from dsize,fx and fy,
False to not keep ratio.
.. note::
If mmode is None and mpath is given it creates mmap file but loads from it to memory.
It is useful to create physical copy of data to keep loading from (data can be reloaded
even if original file is moved or deleted).
:return loader function
"""
# create factory functions
def errorFunc(im, path):
if im is None:
if checkFile(path):
if getData(path)[-1][1:] in supported_formats:
raise Exception(
"Not enough permissions to load '{}'".format(path))
else:
raise Exception(
"Failed to load '{}'. Format not supported".format(path))
else:
raise Exception("Missing file '{}'".format(path))
RGB = False
if abs(flag) == 2: # determine if needs to do conversion from BGR to RGB
flag = flag // 2 # get normal flag
RGB = True
def loadfunc(path):
im = interpretImage(path, flag) # load func
if throw:
errorFunc(im, path) # if not loaded throw error
if flag < 0 and im.shape[2] != 4:
if RGB:
return cv2.cvtColor(im, cv2.COLOR_BGR2RGBA)
return cv2.cvtColor(im, cv2.COLOR_BGR2BGRA)
if RGB:
if flag < 0:
return cv2.cvtColor(im, cv2.COLOR_BGRA2RGBA)
else:
return cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
return im
if dsize or dst or fx or fy:
if fx is None and fy is None:
fx = fy = 1.0
elif keepratio:
if fx is not None and fy is None:
fy = fx
elif fy is not None and fx is None:
fx = fy
else:
if fx is not None and fy is None:
fy = 1
elif fy is not None and fx is None:
fx = 1
interpolation = interpolation or 0
if keepratio:
def calc_dsize(shape, dsize=None):
"""
calculates dsize to keep the image's ratio.
:param shape: image shape
:param dsize: dsize tuple with a None value
:return: calculated dsize
"""
x, y = dsize
sy, sx = shape[:2]
if x is not None and y is None:
dsize = x, int(sy * (x * 1.0 / sx))
elif y is not None and x is None:
dsize = int(sx * (y * 1.0 / sy)), y
else:
dsize = sx, sy
return dsize
else:
def calc_dsize(shape, dsize=None):
"""
calculates dsize without keeping the image's ratio.
:param shape: image shape
:param dsize: dsize tuple with a None value
:return: calculated dsize
"""
dsize = list(dsize)
ndsize = shape[:2][::-1]
for i, val in enumerate(dsize):
if val is None:
dsize[i] = ndsize[i]
dsize = tuple(dsize)
return dsize
if dsize is not None and None in dsize:
def resizefunc(path):
img = loadfunc(path)
return cv2.resize(img, calc_dsize(img.shape, dsize), dst, fx, fy, interpolation)
else:
def resizefunc(path):
return cv2.resize(loadfunc(path), dsize, dst, fx, fy, interpolation)
func = resizefunc
else:
func = loadfunc
if mmode or mpath is not None: # if there is a mmode, or mpath is string
def mapfunc(path):
if mpath == "*": # save mmap in working directory
drive, dirname, (filename, ext) = "", "", getData(path)[-2:]
elif mpath: # save mmap in mpath
drive, dirname, filename, ext = getData(changedir(path, mpath))
else: # save mmap in image path
drive, dirname, filename, ext = getData(path)
"""
# THIS CREATES A FOLDER TO MEMOIZE
def dummy(path,flag=0,dsize=0,fx=0,fy=0,interpolation=0):
return func(path)
savepath = "{}{}{}{}".format(drive,dirname,filename,ext)
return memoize(savepath,mmap_mode=mmode)(dummy)(path,flag,dsize,fx,fy,interpolation)"""
"""
# THIS CREATES TWO FILES BUT ONLY MEMOIZE ONE STATE OF IM ARGUMENTS
savepath = "{}{}{}{}.{}".format(drive,dirname,filename,ext,"memoized")
comps = ("flag","dsize","fx","fy","interpolation")
try: # load from map
data = mapper(savepath, mmode=mmode)[0]
bad = [i for i in comps if data[i] != locals()[i]]
if bad:
raise IOError
else:
return data["im"]
except IOError: # create object and map
im = func(path)
if im is None: # this is regardless of throw flag
raise Exception("Failed to load image to map")
data = dict(im=im,flag=flag,dsize=dsize,fx=fx,fy=fy,interpolation=interpolation)
return mapper(savepath,data,mmode)[0]["im"]"""
"""
# THIS CREATES TWO HASHED FILES
hashed = hash("{}{}{}{}{}{}".format(flag,RGB,dsize,fx,fy,interpolation))
savepath = "{}{}{}{}.{}".format(drive,dirname,filename,ext,hashed)
try: # load from map
im = mapper(savepath, mmode=mmode)[0]
return im
except IOError: # create object and map
im = func(path)
if im is None: # this is regardless of throw flag
raise Exception("Failed to load image to map")
return mapper(savepath,im,mmode)[0]"""
# THIS CREATES ONE HASHED FILE
hashed = hash("{}{}{}{}{}{}".format(
flag, RGB, dsize, fx, fy, interpolation))
savepath = "{}{}{}{}.{}.npy".format(
drive, dirname, filename, ext, hashed)
try: # load from map
# mapper(savepath,None,mmode,True)[0]#
return np.lib.load(savepath, mmode)
except IOError: # create object and map
im = func(path)
if im is None: # this is regardless of throw flag
raise Exception("Failed to load image to map")
np.save(savepath, im)
# mapper(savepath,im,mmode,True)[0]#
return np.lib.load(savepath, mmode)
func = mapfunc # factory function
def loader(path):
try:
return func(path)
except Exception as e:
if throw:
raise
return loader # factory function
class ImLoader(object):
"""
Class to load image array from path, url,
server, string or directly from numpy array (supports databases).
:param flag: (default: 0) 0 to read as gray, 1 to read as BGR, -1 to
read as BGRA, 2 to read as RGB, -2 to read as RGBA.
It supports openCV flags:
* cv2.CV_LOAD_IMAGE_COLOR
* cv2.CV_LOAD_IMAGE_GRAYSCALE
* cv2.CV_LOAD_IMAGE_UNCHANGED
+-------+-------------------------------+--------+
| value | openCV flag | output |
+=======+===============================+========+
| (2) | N/A | RGB |
+-------+-------------------------------+--------+
| (1) | cv2.CV_LOAD_IMAGE_COLOR | BGR |
+-------+-------------------------------+--------+
| (0) | cv2.CV_LOAD_IMAGE_GRAYSCALE | GRAY |
+-------+-------------------------------+--------+
| (-1) | cv2.CV_LOAD_IMAGE_UNCHANGED | BGRA |
+-------+-------------------------------+--------+
| (-2) | N/A | RGBA |
+-------+-------------------------------+--------+
:param dsize: (None) output image size; if it equals zero, it is computed as:
\texttt{dsize = Size(round(fx*src.cols), round(fy*src.rows))}
:param dst: (None) output image; it has the size dsize (when it is non-zero) or the
size computed from src.size(), fx, and fy; the type of dst is uint8.
:param fx: scale factor along the horizontal axis; when it equals 0, it is computed as
\texttt{(double)dsize.width/src.cols}
:param fy: scale factor along the vertical axis; when it equals 0, it is computed as
\texttt{(double)dsize.height/src.rows}
:param interpolation: interpolation method compliant with opencv:
+-----+-----------------+-------------------------------------------------------+
|flag | Operation | Description |
+=====+=================+=======================================================+
|(0) | INTER_NEAREST | nearest-neighbor interpolation |
+-----+-----------------+-------------------------------------------------------+
|(1) | INTER_LINEAR | bilinear interpolation (used by default) |
+-----+-----------------+-------------------------------------------------------+
|(2) | INTER_CUBIC | bicubic interpolation over 4x4 pixel neighborhood |
+-----+-----------------+-------------------------------------------------------+
|(3) | INTER_AREA | resampling using pixel area relation. |
| | | It may be a preferred method for image decimation, |
| | | as it gives moire’-free results. But when the image |
| | | is zoomed, it is similar to the INTER_NEAREST method. |
+-----+-----------------+-------------------------------------------------------+
|(4) | INTER_LANCZOS4 | Lanczos interpolation over 8x8 pixel neighborhood |
+-----+-----------------+-------------------------------------------------------+
:param mmode: (None) mmode to create mapped file. if mpath is specified loads image, converts
to mapped file and then loads mapping file with mode {None, 'r+', 'r', 'w+', 'c'}
(it is slow for big images). If None, loads mapping file to memory (useful to keep
image copy for session even if original image is deleted or modified).
:param mpath: (None) path to create mapped file.
None, do not create mapping file
"", uses path directory;
"*", uses working directory;
else, uses specified directory.
.. note:: If mmode is None and mpath is given it creates mmap file but loads from it to memory.
It is useful to create physical copy of data to keep loading from (data can be reloaded
even if original file is moved or deleted).
"""
def __init__(self, path, flag=0, dsize=None, dst=None, fx=None, fy=None,
interpolation=None, mmode=None, mpath=None, throw=True):
self.path = path
self._flag = flag
self._dsize = dsize
self._dst = dst
self._fx = fx
self._fy = fy
self._interpolation = interpolation
self._mmode = mmode
self._mpath = mpath
self._throw = throw
self.load = loadFunc(flag, dsize, dst, fx, fy,
interpolation, mmode, mpath, throw)
#i = loadFunc.__doc__.find(":param")
#__init__.__doc__ = loadFunc.__doc__[:i] + __init__.__doc__ + loadFunc.__doc__[i:] # builds documentation dynamically
# del i # job done, delete
def __call__(self):
return self.load(self.path)
def getConfiguration(self, **kwargs):
"""
Get Custom configuration from default configuration.
:param kwargs: keys to customize default configuration.
If no key is provided default configuration is returned.
:return: dictionary of configuration
"""
temp = {"flag": self._flag, "dsize": self._dsize, "dst": self._dst, "fx": self._fx, "fy": self._fy,
"interpolation": self._interpolation, "mmode": self._mmode, "mpath": self._mpath, "throw": self._throw}
if kwargs:
temp.update(kwargs)
return temp
def temp(self, **kwargs):
"""
loads from temporal loader created with customized and default parameters.
:param kwargs: keys to customize default configuration.
:return: loaded image.
"""
if len(kwargs) == 1 and "path" in kwargs:
return self.load(kwargs["path"])
path = kwargs.get("path", self.path) # get path
# build a new loader and load path
return loadFunc(**self.getConfiguration(**kwargs))(path)
class PathLoader(MutableSequence):
"""
Class to standardize loading images from list of paths and offer lazy evaluations.
:param fns: list of paths
:param loader: path loader (loadcv,loadsfrom, or function from loadFunc)
Also see:: :func:`loadcv`, :func:`loadsfrom`, :func:`loadFunc`
Example::
fns = ["/path to/image 1.ext","/path to/image 2.ext"]
imgs = pathLoader(fns)
print imgs[0] # loads image in path 0
print imgs[1] # loads image in path 1
"""
def __init__(self, fns=None, loader=None):
# create factory functions
self._fns = fns or []
self._loader = loader or loadFunc()
def __call__(self):
"""
if called returns the list of paths
"""
return self._fns
def __getitem__(self, key):
return self._loader(self._fns[key])
def __setitem__(self, key, value):
self._fns[key] = value
def __delitem__(self, key):
del self._fns[key]
def __len__(self):
return len(self._fns)
def insert(self, index, value):
self._fns.insert(index, value)
class LoaderDict(ResourceManager):
"""
Class to standardize loading objects and manage memory efficiently.
:param loader: default loader for objects (e.g. load from file or create instance object)
:param maxMemory: (None) max memory in specified unit to keep in check optimization (it does
not mean that memory never surpasses maxMemory).
:param margin: (0.8) margin from maxMemory to trigger optimization.
It is in percentage of maxMemory ranging from 0 (0%) to maximum 1 (100%).
So optimal memory is inside range: maxMemory*margin < Memory < maxMemory
:param unit: (MB) maxMemory unit, it can be GB (Gigabytes), MB (Megabytes), B (bytes)
:param all: if True used memory is from all alive references,
if False used memory is only from keptAlive references.
:param config: (Not Implemented)
"""
def __init__(self, loader=None, maxMemory=None, margin=0.8,
unit="MB", all=True, config=None):
super(LoaderDict, self).__init__(maxMemory, margin, unit, all)
# create factory functions
#if config is None: from config import MANAGER as config
#self._config = config
self._default_loader = loader or loadFunc()
def register(self, key, path=None, method=None):
if method is not None:
def func(): return method(func.path)
else:
def func(): return self._default_loader(func.path)
func.path = path
super(LoaderDict, self).register(key=key, method=func)
def try_loads(fns, func=cv2.imread, paths=None, debug=False, addpath=False):
"""
Try to load images from paths.
:param fns: list of file names
:param func: loader function
:param paths: paths to try. By default it loads working dir and test path
:param debug: True to show debug messages
:param addpath: add path as second argument
:return: image else None
"""
default = ("", str(MANAGER["TESTPATH"]))
if paths is None:
paths = []
if isinstance(paths, basestring):
paths = [paths]
paths = list(paths)
for i in default:
if i not in paths:
paths.append(i)
for fn in fns:
for path in paths:
try:
if path[-1] not in ("/", "\\"): # ensures path
path += "/"
except:
pass
path += fn
im = func(path) # foreground
if im is not None:
if debug:
print(path, " Loaded...")
if addpath:
return im, path
return im
def hist_match(source, template, alpha=None):
"""
Adjust the pixel values of an image to match those of a template image.
:param source: image to transform colors to template
:param template: template image ()
:param alpha:
:return: transformed source
"""
# theory https://en.wikipedia.org/wiki/Histogram_matching
# explanation http://dsp.stackexchange.com/a/16175 and
# http://fourier.eng.hmc.edu/e161/lectures/contrast_transform/node3.html
# based on implementation http://stackoverflow.com/a/33047048/5288758
# see http://www.mathworks.com/help/images/ref/imhistmatch.html
if len(source.shape) > 2:
matched = np.zeros_like(source)
for i in range(3):
matched[:, :, i] = hist_match(source[:, :, i], template[:, :, i])
return matched
else:
oldshape = source.shape
source = source.ravel()
template = template.ravel()
# get the set of unique pixel values and their corresponding indices and
# counts
s_values, bin_idx, s_counts = np.unique(source, return_inverse=True,
return_counts=True)
t_values, t_counts = np.unique(template, return_counts=True)
# take the cumsum of the counts and normalize by the number of pixels to
# get the empirical cumulative distribution functions for the source and
# template images (maps pixel value --> quantile)
s_quantiles = np.cumsum(s_counts).astype(np.float64)
s_quantiles /= s_quantiles[-1]
t_quantiles = np.cumsum(t_counts).astype(np.float64)
t_quantiles /= t_quantiles[-1]
# interpolate linearly to find the pixel values in the template image
# that correspond most closely to the quantiles in the source image
interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)
return interp_t_values[bin_idx].reshape(oldshape) # reconstruct image
############################# GETCOORS ###################################
# http://docs.opencv.org/master/db/d5b/tutorial_py_mouse_handling.html
# http://docs.opencv.org/modules/highgui/doc/qt_new_functions.html
class ImCoors(object):
"""
Image's coordinates class.
Example::
a = ImCoors(np.array([(116, 161), (295, 96), (122, 336), (291, 286)]))
print a.__dict__
print "mean depend on min and max: ", a.mean
print a.__dict__
print "after mean max has been already been calculated: ", a.max
a.data = np.array([(116, 161), (295, 96)])
print a.__dict__
print "mean and all its dependencies are processed again: ", a.mean
"""
def __init__(self, pts, dtype=FLOAT, deg=False):
"""
Initiliazes ImCoors.
:param pts: list of points
:param dtype: return data as dtype. Default is config.FLOAT
"""
self._pts = pts # supports bigger numbers
self._dtype = dtype
self._deg = deg
@property
def pts(self):
return self._pts
@pts.setter
def pts(self, value):
getattr(self, "__dict__").clear()
self._pts = value
@pts.deleter
def pts(self):
raise Exception("Cannot delete attribute")
@property
def dtype(self):
return self._dtype
@dtype.setter
def dtype(self, value):
getattr(self, "__dict__").clear()
self._dtype = value
@dtype.deleter
def dtype(self):
raise Exception("Cannot delete attribute")
# DATA METHODS
def __len__(self):
return len(self._pts)
@cache
def max(self):
"""
Maximum in each axis.
:return: x_max, y_max
"""
#self.max_x, self.max_y = np.max(self.data,0)
return tuple(np.max(self._pts, 0))
@cache
def min(self):
"""
Minimum in each axis.
:return: x_min, y_min
"""
#self.min_x, self.min_y = np.min(self.data,0)
return tuple(np.min(self._pts, 0))
@cache
def rectbox(self):
"""
Rectangular box enclosing points (origin and end point or rectangle).
:return: (x0,y0),(x,y)
"""
return (self.min, self.max)
@cache
def boundingRect(self):
"""
Rectangular box dimensions enclosing points.
example::
P = np.ones((400,400))
a = ImCoors(np.array([(116, 161), (295, 96), (122, 336), (291, 286)]))
x0,y0,w,h = a.boundingRect
P[y0:y0+h,x0:x0+w] = 0
:return: x0,y0,w,h
"""
return cv2.boundingRect(self._pts)
@cache
def minAreaRect(self):
return cv2.minAreaRect(self._pts)
@cache
def rotatedBox(self):
"""
Rotated rectangular box enclosing points.
:return: 4 points.
"""
try: # opencv 2
return self._dtype(cv2.cv.BoxPoints(self.minAreaRect))
except AttributeError: # opencv 3
return self._dtype(cv2.boxPoints(self.minAreaRect))
@cache
def boxCenter(self):
"""
Mean in each axis.
:return: x_mean, y_mean
"""
#self.mean_x = (self.max_x+self.min_x)/2
#self.mean_y = (self.max_y+self.min_y)/2
xX, xY = self.max
nX, nY = self.min
return tuple(self._dtype((xX + nX, xY + nY)) / 2)
@cache
def mean(self):
"""
Center or mean.
:return: x,y
"""
# http://hyperphysics.phy-astr.gsu.edu/hbase/cm.html
# https://www.grc.nasa.gov/www/K-12/airplane/cg.html
#self.center_x, self.center_y = np.sum(self.data,axis=0)/len(self.data)
# map(int,np.mean(self.data,0))
# tuple(np.sum(self.data,axis=0)/len(self.data))
return tuple(np.mean(self._pts, 0, dtype=self._dtype))
center = mean
@cache
def area(self):
"""
Area of points.
:return: area number
"""
return polygonArea(self._pts)
@cache
def rectangularArea(self):
"""
Area of rectangle enclosing points aligned with x,y axes.
:return: area number
"""
# method 1, it is not precise in rotation
(x0, y0), (x, y) = self.rectbox
return self.dtype(np.abs((x - x0) * (y - y0)))
@cache
def rotatedRectangularArea(self):
"""
Area of Rotated rectangle enclosing points.
:return: area number
"""
return polygonArea(self.rotatedBox)
@cache
def rectangularity(self):
"""
Ratio that represent a perfect square aligned with x,y axes.
:return: ratio from 1 to 0, 1 representing a perfect rectangle.
"""
# method 1
#cx,cy = self.center
# bcx,bcy=self.boxCenter
# return (cx)/bcx,(cy)/bcy # x_ratio, y_ratio
# method 2
return self.dtype(self.area / self.rectangularArea)
@cache
def rotatedRectangularity(self):
"""
Ratio that represent a perfect rotated square fitting points.
:return: ratio from 1 to 0, 1 representing a perfect rotated rectangle.
"""
# prevent unstable values
if self.area < 1:
area = 0 # needed to prevent false values
else:
area = self.area
return self.dtype(area / self.rotatedRectangularArea)
@cache
def regularity(self):
"""
Ratio of forms with similar measurements and angles.
e.g. squares and rectangles have rect angles so they are regular.
For regularity object must give 1.
:return:
"""
# TODO this algorithm is still imperfect
# get pi value in radian or degrees
pi = angle((1, 0), (0, 1), deg=self._deg)
av = self.vertexesAngles
return pi * (len(av)) / np.sum(av) # pi*number_agles/sum_angles
@cache
def relativeVectors(self):
"""
Form vectors from points.
:return: array of vectors [V0, ... , (V[n] = x[n+1]-x[n],y[n+1]-y[n])].
"""
pts = np.array(self._pts)
# adds last vector from last and first point.
pts = np.append(pts, [pts[0]], axis=0)
return np.stack([np.diff(pts[:, 0]), np.diff(pts[:, 1])], 1)
@cache
def vertexesAngles(self):
"""
Relative angle of vectors formed by vertexes.
i.e. angle between vectors "v01" formed by points "p0-p1" and "v12"
formed by points "p1-p2" where "p1" is seen as a vertex (where vectors cross).
:return: angles.
"""
vs = self.relativeVectors # get all vectors from points.
# add last vector to first position
vs = np.roll(np.append(vs, [vs[-1]], axis=0), 2)
return np.array([angle(vs[i - 1], vs[i], deg=self._deg)
for i in range(1, len(vs))], self._dtype) # caculate angles
@cache
def pointsAngles(self):
"""
Angle of vectors formed by points in Cartesian plane with respect to x axis.
i.e. angle between vector "v01" (formed by points "p0-p1") and vector unity in axis x.
:return: angles.
"""
vs = self.relativeVectors # get all vectors from points.
return vectorsAngles(pts=vs, dtype=self._dtype, deg=self._deg)
@cache
def vectorsAngles(self):
"""
Angle of vectors in Cartesian plane with respect to x axis.
i.e. angle between vector "v0" (formed by point "p0" and the origin) and vector unity in axis x.
:return: angles.
"""
return np.array([angle((1, 0), i, deg=self._deg) for i in self._pts], self._dtype) # caculate angles with respect to x axis
def drawcoorpoints(vis, points, col_out=black, col_in=red, radius=2):
"""
Funtion to draw points.
:param vis: image array.
:param points: list of points.
:param col_out: outer color of point.
:param col_in: inner color of point.
:param radius: radius of drawn points.
:return:
"""
points = np.array(points, INT)
radius_in = radius - 1
for x, y in points:
cv2.circle(vis, (x, y), radius, col_out, -1)
cv2.circle(vis, (x, y), radius_in, col_in, -1)
return vis
def myline(img, pt1, pt2, color, thickness=None):
"""
Funtion to draw points (experimental).
:param img:
:param pt1:
:param pt2:
:param color:
:param thickness:
:return:
"""
# y=m*x+b
x1, y1 = np.array(pt1, dtype=FLOAT)
x2, y2 = np.array(pt2, dtype=FLOAT)
m = (y2 - y1) / (x2 - x1)
xmin, xmax = np.sort([x1, x2])
xvect = np.arange(xmin, xmax + 1).astype('int')
yvect = np.array(xvect * m + int(y1 - x1 * m), dtype=np.int)
for i in zip(yvect, xvect):
# img.itemset(i,color)
img[i] = color
def drawcooraxes(vis, points, col_out=black, col_in=green, radius=2):
"""
Function to draw axes instead of points.
:param vis: image array.
:param points: list of points.
:param col_out: outer color of point.
:param col_in: inner color of point.
:param radius: radius of drawn points.
:return:
"""
points = np.array(points, INT)
thickness = radius - 1
h1, w1 = vis.shape[:2] # obtaining image dimensions
for i in points:
h1pt1 = (0, i[1])
h1pt2 = (w1, i[1])
w2pt1 = (i[0], 0)
w2pt2 = (i[0], h1)
cv2.line(vis, h1pt1, h1pt2, col_in, thickness)
cv2.line(vis, w2pt1, w2pt2, col_in, thickness)
vis = drawcoorpoints(vis, points, col_out, col_in, radius)
return vis
def drawcoorpolyline(vis, points, col_out=black, col_in=red, radius=2):
"""
Function to draw interaction with points to obtain polygonal.
:param vis: image array.
:param points: list of points.
:param col_out: outer color of point.
:param col_in: inner color of point.
:param radius: radius of drawn points.
:return:
"""
thickness = radius - 1
if len(points) > 1:
points = np.array(points, INT)
cv2.polylines(vis, [points], False, col_in, thickness)
"""
for i in range(len(points)-1):
pt1 = (points[i][0], points[i][1])
pt2 = (points[i+1][0], points[i+1][1])
cv2.line(vis, pt1, pt2, col_in, thickness)"""
else:
vis = drawcoorpoints(vis, points, col_out, col_in, radius)
return vis
def drawcoorarea(vis, points, col_out=black, col_in=red, radius=2):
"""
Function to draw interaction with points to obtain area.
:param vis: image array.
:param points: list of points.
:param col_out: outer color of point.
:param col_in: inner color of point.
:param radius: radius of drawn points.
:return:
"""
if len(points) > 2:
mask = np.zeros(vis.shape[:2])
cv2.drawContours(mask, [np.array(points, np.int32)], 0, 1, -1)
vis = overlay(vis, np.array(
[(0,) * len(col_in), col_in])[mask.astype(int)], alpha=mask * 0.5)
vis = drawcoorpoints(vis, points, col_out, col_in, radius)
else:
vis = drawcoorpoints(vis, points, col_out, col_in, radius)
return vis
def drawcoorpolyArrow(vis, points, col_out=black, col_in=red, radius=2):
"""
Function to draw interaction with vectors to obtain polygonal.
:param vis: image array.
:param points: list of points.
:param col_out: outer color of point.
:param col_in: inner color of point.
:param radius: radius of drawn points.
:return:
"""
points = np.array(points, INT)
thickness = radius - 1
if len(points) > 1:
for i in range(len(points) - 1):
pt1 = (points[i][0], points[i][1])
pt2 = (points[i + 1][0], points[i + 1][1])
cv2.arrowedLine(vis, pt1, pt2, col_in, thickness)
vis = drawcoorpoints(vis, points, col_out, col_in,
radius) # draw points
else:
vis = drawcoorpoints(vis, points, col_out, col_in, radius)
return vis
def drawcoorperspective(vis, points, col_out=black, col_in=red, radius=2):
"""
Function to draw interaction with points to obtain perspective.
:param vis: image array.
:param points: list of points.
:param col_out: outer color of point.
:param col_in: inner color of point.
:param radius: radius of drawn points.
:return:
"""
points = np.array(points, INT)
thickness = radius - 1
if len(points) > 1 and len(points) < 5:
for i in range(len(points) - 1):
if i % 2:
for j in range(i + 1, min(len(points), i + 3)):
if j % 2:
# print "i=",i," j=",j
pt1 = (points[i][0], points[i][1])
pt2 = (points[j][0], points[j][1])
cv2.arrowedLine(vis, pt1, pt2, col_in, thickness)
else:
for j in range(i + 1, min(len(points), i + 3)):
# print "i=",i," j=",j
pt1 = (points[i][0], points[i][1])
pt2 = (points[j][0], points[j][1])
cv2.arrowedLine(vis, pt1, pt2, col_in, thickness)
vis = drawcoorpoints(vis, points, col_out, col_in,
radius) # draw points
else:
vis = drawcoorpoints(vis, points, col_out, col_in, radius)
return vis
def limitaxispoints(c, maxc, minc=0):
"""
Limit a point in axis.
:param c: list of points..
:param maxc: maximum value of point.
:param minc: minimum value of point.
:return: return limited points.
"""
x = np.zeros(len(c), dtype=np.int)
for i, j in enumerate(c):
x[i] = limitaxis(j, maxc, minc)
return tuple(x)
class GetCoors(Plotim):
"""
Create window to select points from image.
:param im: image to get points.
:param win: window name.
:param updatefunc: function to draw interaction with points.
(e.g. limitaxispoints, drawcoorperspective, etc.).
:param prox: proximity to identify point.
:param radius: radius of drawn points.
:param unique: If True no point can be repeated,
else selected points can be repeated.
:param col_out: outer color of point.
:param col_in: inner color of point.
"""
def __init__(self, im, win="get coordinates", updatefunc=drawcoorpoints,
unique=True, col_out=black, col_in=red):
# functions
super(GetCoors, self).__init__(win, im)
# assign functions
self.updatefunc = updatefunc
self.userupdatefunc = updatefunc
self.prox = 8 # proximity to keypoint
# initialize user variables
self.radius = 3
self.unique = unique
self.col_out = col_out
self.col_in = col_in
# initialize control variables
self.interpolation = cv2.INTER_AREA
self._coors = []
self.rcoors = [] # rendered coordinates
self.coorlen = 0
self.showstats = False
self.mapdata2 = [None, None, None]
self.data2 = np.zeros((self.rH, self.rW, 1), dtype=np.uint8)
self.drawcooraxes = drawcooraxes
self.drawcoorperspective = drawcoorperspective
self.drawcoorpolyline = drawcoorpolyline
self.drawcoorpoints = drawcoorpoints
self.controlText[0].extend([" No. coordinates: {self.coorlen}. "])
self.cmdeval.update({"points": "self.updatefunc = self.drawcoorpoints",
"polyline": "self.updatefunc = self.drawcoorpolyline",
"perspective": "self.updatefunc = self.drawcoorperspective",
"axes": "self.updatefunc = self.drawcooraxes",
"user": "self.updatefunc = self.userupdatefunc",
"end": ["self.updatecoors()", "self.mousefunc()"]})
self.cmdlist.extend(["unique", "showstats", "user", "points",
"polyline", "perspective", "axes"])
# self.coors # return coordinates
@property
def coors(self):
return self._coors
@coors.setter
def coors(self, value):
self._coors = value
self.updatecoors()
@coors.deleter
def coors(self):
self._coors = []
self.updatecoors()
def drawstats(self, points, col_out=black, col_in=green, radius=2):
"""
:param self:
:param points:
:param col_out:
:param col_in:
:param radius:
:return:
"""
vis = self.rimg
p = ImCoors(points)
self.data2 = np.zeros((vis.shape[0], vis.shape[1], 1), dtype=np.uint8)
drawcooraxes(vis, [p.boxCenter], col_out, col_in, radius)
drawcooraxes(self.data2, [p.boxCenter], 1, 1, self.prox)
drawcooraxes(vis, [p.mean], col_in, col_out, radius)
drawcooraxes(self.data2, [p.mean], 2, 2, self.prox)
p1 = ImCoors(self.coors)
self.mapdata2 = [None, "center at " +
str(p1.boxCenter), "mean at " + str(p1.mean)]
def updatecoors(self):
"""
:param self:
:return:
"""
self.coorlen = len(self.coors)
self.updaterenderer()
if self.coors != []:
self.rcoors = self.coors[:]
newc = self.coors[:]
for j, i in enumerate(self.coors):
newc[j] = self.real2render(i[0], i[1])
self.rcoors[j] = limitaxispoints(newc[j], 10000, -10000)
if self.showstats:
self.drawstats(newc, radius=self.radius)
self.rimg = self.updatefunc(
self.rimg, newc, self.col_out, self.col_in, self.radius)
else:
self.data2[:] = 0
self.coordinateText = [["xy({self.x},{self.y})"]]
def mousefunc(self):
"""
:param self:
:return:
"""
# control system
controlled = self.builtincontrol()
drawed = False
# get nearest coordinate to pointer
isnear = False
if self.coors != [] and self.rx is not None and self.ry is not None:
# vals = anorm(np.int32(self.coors) - (self.x, self.y)) # relative
# to real coordinates
# relative to rendered coordinates
vals = anorm(np.int32(self.rcoors) - (self.rx, self.ry))
near_point = np.logical_and(vals < self.prox, vals == np.min(vals))
if np.any(near_point): # if near point
idx = np.where(near_point)[0] # get index
isnear = True
val = self.coors[idx[0]]
count = self.coors.count(val)
self.coordinateText = [
["point " + str(idx[0]) + " at " + str(val) + "x" + str(count)]]
else:
self.coordinateText = [["xy({self.x},{self.y})"]]
# coordinate system
if not controlled and bool(self.flags):
if self.event == cv2.EVENT_RBUTTONDBLCLK: # if middle button DELETE ALL COORDINATES
self.coors = []
self.img = self.data.copy()
drawed = True
elif isnear and self.event == cv2.EVENT_RBUTTONDOWN: # if right button DELETE NEAREST COORDINATE
self.coors.pop(idx[0]) # if more than one point delete first
drawed = True
elif self.event == cv2.EVENT_LBUTTONDOWN: # if left button ADD COORDINATE
val = (self.x, self.y)
if not self.coors.count(val) or not self.unique:
self.coors.append(val)
drawed = True
# update renderer
if (controlled or drawed):
self.updatecoors()
if self.y is not None and self.x is not None:
if self.showstats:
data = self.mapdata2[self.data2[self.ry, self.rx]]
if not isnear and data is not None:
self.coordinateText = [[data]]
self.builtinplot(self.data[int(self.y), int(self.x)])
def getcoors(im, win="get coordinates", updatefunc=drawcoorpoints, coors=None,
prox=8, radius=3, unique=True, col_out=black, col_in=red):
self = GetCoors(im, win, updatefunc, unique=unique,
col_out=col_out, col_in=col_in)
self.radius = radius
self.prox = prox
if coors is not None:
self.coors = standarizePoints(coors, aslist=True)
self.show(clean=False)
coors = self.coors
self.clean()
return coors
def separe(values, sep, axis=0):
"""
Separate values from separator or threshold.
:param values: list of values
:param sep: peparator value
:param axis: axis in each value
:return: lists of greater values, list of lesser values
"""
greater, lesser = [], []
for i in values:
if i[axis] > sep:
greater.append(i)
else:
lesser.append(i)
return greater, lesser
def getrectcoors(*data):
"""
Get ordered points.
:param data: list of points
:return: [Top_left,Top_right,Bottom_left,Bottom_right]
"""
#[Top_left,Top_right,Bottom_left,Bottom_right]
# img, win = "get pixel coordinates", updatefunc = drawpoint
if len(data) == 1: # points
points = data[0]
else: # img, win
points = getcoors(*data)
p = ImCoors(points)
min_x, min_y = p.min
max_x, max_y = p.max
Top_left = (min_x, min_y)
Top_right = (max_x, min_y)
Bottom_left = (min_x, max_y)
Bottom_right = (max_x, max_y)
return [Top_left, Top_right, Bottom_left, Bottom_right]
def quadrants(points):
"""
Separate points respect to center of gravity point.
:param points: list of points
:return: [[Top_left],[Top_right],[Bottom_left],[Bottom_right]]
"""
# group points on 4 quadrants
# [Top_left,Top_right,Bottom_left,Bottom_right]
p = ImCoors(points) # points data x,y -> (width,height)
mean_x, mean_y = p.mean
Bottom, Top = separe(points, mean_y, axis=1)
Top_right, Top_left = separe(Top, mean_x, axis=0)
Bottom_right, Bottom_left = separe(Bottom, mean_x, axis=0)
return [Top_left, Top_right, Bottom_left, Bottom_right]
def getgeometrycoors(*data):
"""
Get filled object coordinates. (function in progress)
"""
#[Top_left,Top_right,Bottom_left,Bottom_right]
# img, win = "get pixel coordinates", updatefunc = drawpoint
if len(data) == 1: # points
points = data[0]
else: # img, win
points = getcoors(*data)
return points
def random_color(channels=1, min=0, max=256):
"""
Random color.
:param channels: number of channels
:param min: min color in any channel
:param max: max color in any channel
:return: random color
"""
return [np.random.randint(min, max) for i in range(channels)]
class Image(object):
"""
Structure to load and save images
"""
def __init__(self, name=None, ext=None, path=None, shape=None, verbosity=False):
self._loader = loadFunc(-1, dsize=None, throw=False)
self._shape = None
self.shape = shape # it is the inverted of dsize
self.ext = ext
self.name = name
self.path = path
self._RGB = None
self._RGBA = None
self._gray = None
self._BGRA = None
self._BGR = None
self.overwrite = False
self.verbosity = verbosity
self.log_saved = None
self.log_loaded = None
self.last_loaded = None
@property
def shape(self):
return self._shape
@shape.setter
def shape(self, value):
if value != self._shape:
if value is not None:
# invert, for value is shape and we need dsize
value = value[1], value[0]
self._loader = loadFunc(-1, dsize=value, throw=False)
self._shape = value
@shape.deleter
def shape(self):
del self._shape
@property
def ext(self):
if self._ext is None:
return ""
return self._ext
@ext.setter
def ext(self, value):
try:
if not value.startswith("."): # ensures path
value = "." + value
except:
pass
self._ext = value
@ext.deleter
def ext(self):
del self._ext
@property
def path(self):
if self._path is None:
return ""
return self._path
@path.setter
def path(self, value):
try:
if value[-1] not in ("/", "\\"): # ensures path
value += "/"
except:
pass
self._path = value
@path.deleter
def path(self):
del self._path
@property
def BGRA(self):
if self._BGRA is None:
self.load()
return self._BGRA
@BGRA.setter
def BGRA(self, value):
self._BGRA = value
@BGRA.deleter
def BGRA(self):
self._BGRA = None
@property
def BGR(self):
if self._BGR is None:
self.load()
return self._BGR
@BGR.setter
def BGR(self, value):
self._BGR = value
@BGR.deleter
def BGR(self):
self._BGR = None
@property
def RGB(self):
if self._RGB is None:
self._RGB = cv2.cvtColor(self.BGR, cv2.COLOR_BGR2RGB)
return self._RGB
@RGB.setter
def RGB(self, value):
self._RGB = value
@RGB.deleter
def RGB(self):
self._RGB = None
@property
def RGBA(self):
if self._RGBA is None:
self._RGBA = cv2.cvtColor(self.BGRA, cv2.COLOR_BGRA2RGBA)
return self._RGBA
@RGBA.setter
def RGBA(self, value):
self._RGBA = value
@RGBA.deleter
def RGBA(self):
self._RGBA = None
@property
def gray(self):
if self._gray is None:
self._gray = cv2.cvtColor(self.BGR, cv2.COLOR_BGR2GRAY)
return self._gray
@gray.setter
def gray(self, value):
self._gray = value
@gray.deleter
def gray(self):
self._gray = None
def save(self, name=None, image=None, overwrite=None):
"""
save restored image in path.
:param name: filename, string to format or path to save image.
if path is not a string it would be replaced with the string
"{path}restored_{name}{ext}" to format with the formatting
"{path}", "{name}" and "{ext}" from the baseImage variable.
:param image: (self.BGRA)
:param overwrite: If True and the destine filename for saving already
exists then it is replaced, else a new filename is generated
with an index "{filename}_{index}.{extension}"
:return: saved path, status (True for success and False for fail)
"""
if name is None:
name = self.name
if name is None:
raise Exception("name parameter needed")
if image is None:
image = self.BGRA
if overwrite is None:
overwrite = self.overwrite
bbase, bpath, bname = getData(self.path)
bext = self.ext
# format path if user has specified so
data = getData(name.format(path="".join((bbase, bpath)),
name=bname, ext=bext))
# complete any data lacking in path
for i, (n, b) in enumerate(zip(data, (bbase, bpath, bname, bext))):
if not n:
data[i] = b
# joint parts to get string
fn = "".join(data)
mkPath(getPath(fn))
if not overwrite:
fn = increment_if_exits(fn)
if cv2.imwrite(fn, image):
if self.verbosity:
print("Saved: {}".format(fn))
if self.log_saved is not None:
self.log_saved.append(fn)
return fn, True
else:
if self.verbosity:
print("{} could not be saved".format(fn))
return fn, False
def load(self, name=None, path=None, shape=None):
if name is None:
name = self.name
if path is None:
path = self.path
if path is None:
path = ""
if shape is not None:
self.shape = shape
data = try_loads([name, name + self.ext], paths=path,
func=self._loader, addpath=True)
if data is None:
raise Exception("Image not Loaded")
img, last_loaded = data
if self.log_loaded is not None:
self.log_loaded.append(last_loaded)
if self.verbosity:
print("loaded: {}".format(last_loaded))
self.last_loaded = last_loaded
self._RGB = None
self._RGBA = None
self._gray = None
if img.shape[2] == 3:
self.BGR = img
self.BGRA = cv2.cvtColor(img, cv2.COLOR_BGR2BGRA)
else:
self.BGRA = img
self.BGR = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
return self
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 20 09:02:18 2019
@author: Andrea
"""
def add_frac(zaeler1, nenner1, zaeler2, nenner2):
"""Dieses Programm addiert 2 Brüche miteinander und kürzt sie mit ihrem größten gemeinsamen Teiler (ggt)"""
#Variablen einführen
zaeler=0
nennerg=0
ggt = 1
#überprüfen ob die Eingabe eine Ganzzahl ist
if not(isinstance(zaeler1,int) and isinstance(nenner1, int) and isinstance(zaeler2, int) and isinstance(nenner2, int)):
print("Gib bitte nur ganze Zahlen ein")
return 0
#neuen Nenner und neuen Zähler ausrechnen
else:
nennerg=nenner1*nenner2
zaeler1neu=zaeler1*nenner2
zaeler2neu=zaeler2*nenner1
zaeler=zaeler1neu+zaeler2neu
if zaeler<nennerg:
maximum = zaeler
else:
maximum=nennerg
#ggt durch iterieren ermitteln
for i in range(1,maximum):
ggt = maximum-i
if (zaeler%ggt)==0 and (nennerg%ggt)==0:
break
return zaeler/ggt, nennerg/ggt
def is_prime(ganzzahl):
for i in range(2,ganzzahl):
result=ganzzahl/i
if result.is_integer(): # Keine Primzahl
return 0
return 1
|
#
# Demonstrates that the super-class implementation of an overridden method
# can be called in the same way as with normal objects.
#
from Foundation import *
N = 1
class MyObject (NSObject):
def init(self):
global N
if N == 1:
print "Calling super.init"
N = 0
# Call super-class implementation.
super(MyObject, self).init()
else:
print "Cyclic call detected"
x = MyObject.alloc().init()
|
import jsonobject
import jsl
def jsl_field_to_jsonobject_property(prop: jsl.BaseField) -> jsonobject.JsonProperty:
if isinstance(prop, jsl.DateTimeField):
return jsonobject.DateTimeProperty(name=prop.name, required=prop.required)
if isinstance(prop, jsl.StringField):
return jsonobject.StringProperty(name=prop.name, required=prop.required)
if isinstance(prop, jsl.IntField):
return jsonobject.IntegerProperty(name=prop.name, required=prop.required)
if isinstance(prop, jsl.DictField):
return jsonobject.DictProperty(name=prop.name, required=prop.required)
if isinstance(prop, jsl.NumberField):
return jsonobject.FloatProperty(name=prop.name, required=prop.required)
if isinstance(prop, jsl.BooleanField):
return jsonobject.BooleanProperty(name=prop.name, required=prop.required)
if isinstance(prop, jsl.DocumentField):
if prop.document_cls:
subtype = jsl2jsonobject(prop.document_cls)
return jsonobject.DictProperty(
name=prop.name, item_type=subtype, required=prop.required
)
return jsonobject.DictProperty(name=prop.name, required=prop.required)
if isinstance(prop, jsl.ArrayField):
if prop.items:
if isinstance(prop.items, jsl.DocumentField):
subtype = jsl2jsonobject(prop.items.document_cls)
elif isinstance(prop.items, jsl.BaseField):
subtype = jsl_field_to_jsonobject_property(prop.items)
else:
raise KeyError(prop.items)
return jsonobject.ListProperty(item_type=subtype, required=prop.required)
return jsonobject.ListProperty(name=prop.name, required=prop.required)
raise KeyError(prop)
def jsl2jsonobject(schema):
# output jsonobject schema from jsl schema
attrs = {}
for attr, prop in schema._fields.items():
prop.name = attr
attrs[attr] = jsl_field_to_jsonobject_property(prop)
Schema = type("Schema", (jsonobject.JsonObject,), attrs)
return Schema
convert = jsl2jsonobject
|
import argparse
import pandas as pd
parser = argparse.ArgumentParser()
parser.add_argument(
'--data-train', type=str, default='./data/original/train.csv',
help='train data path')
parser.add_argument(
'--data-test', type=str, default='./data/original/test.csv',
help='test data path')
parser.add_argument(
'--seed', type=int, default=42,
help='seed')
args = parser.parse_args()
df_train = pd.read_csv(args.data_train, header=None)
df_train = df_train[[0, 1]]
df_train = df_train.rename(columns={0: 'label', 1: 'text'})
df_test = pd.read_csv(args.data_test, header=None)
df_test = df_test.rename(columns={0: 'label', 1: 'text'})
df = pd.concat([df_train, df_test])
df = df.sample(frac=1, random_state=args.seed)
df.to_csv('./data/all.csv', index=False, header=True)
|
from mlagents.tf_utils.tf import tf as tf # noqa
from mlagents.tf_utils.tf import set_warnings_enabled # noqa
|
# coding=utf-8
from os import path
from constantes import *
import pickle
from random import choice
from indexer import Indexer
class Zone:
def __init__(self, id_: str, creatures_id: list, level_range: tuple):
self.id = id_ # c'est l'id d'une subCarte
self.creatures_id = creatures_id
self.level_range = level_range
def get_id(self):
return self.id
def get_new_adversaire(self, indexer: Indexer):
id_ = choice(self.creatures_id)
type_ = indexer.get_type_of(id_)
return id_, type_, self.level_range
class ZonesManager:
def __init__(self, indexer: Indexer):
self.indexer = indexer
self.path = path.join("..", "saves", "zones" + EXTENSION)
self.zones = []
@staticmethod
def add_new_zone_to_path(zone: Zone):
path_ = path.join("..", "saves", "zones" + EXTENSION)
if path.exists(path_):
with open(path_, 'rb') as read_zones:
sv = pickle.Unpickler(read_zones).load()
sv.append(zone)
pickle.Pickler(open(path_, 'wb')).dump(sv)
else:
with open(path_, 'wb') as add_new:
pickle.Pickler(add_new).dump([zone])
def get_new_adversary(self, with_zoneid: int):
for zone in self.zones:
if zone.get_id() == with_zoneid:
return zone.get_new_adversaire(self.indexer)
return ZONE_ADV_ERROR
def load(self):
if path.exists(self.path):
with open(self.path, 'rb') as zones_rb:
self.zones = pickle.Unpickler(zones_rb).load()
def save(self):
with open(self.path, 'wb') as save_zones:
pickle.Pickler(save_zones).dump(self.zones)
|
from django.shortcuts import render_to_response, get_object_or_404, redirect, render
from django.core.paginator import Paginator
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse, Http404, FileResponse
from django.conf import settings
import datetime
from . import models
import os
import all.models as all_model
import student.models as student_model
import teacher.models as teacher_model
# Create your views here.
# 竞赛列表
def com_list(request):
context = {}
# 没有登录或者还未修改个人信息都无法报名
try:
is_login = request.session['is_login']
except KeyError:
context['have_login'] = "赶紧登录啦 :("
else:
user_num = request.session['user_number']
user_info = get_object_or_404(all_model.user_login_info, account=user_num)
if user_info.have_alter == '0':
context['have_alter'] = "客官还没确认个人信息啦 :( 赶紧滚去修改"
com_basic_info = models.com_basic_info.objects.all()
context['com_list'] = com_basic_info
return render(request, 'competition/com_list.html', context)
# 竞赛详情
def com_detail(request):
context = {}
# 没有登录或者还未修改个人信息都无法报名
try:
is_login = request.session['is_login']
except KeyError:
return redirect("/competition/com_list/")
else:
user_num = request.session['user_number']
user_info = get_object_or_404(all_model.user_login_info, account=user_num)
if user_info.have_alter == '0':
return redirect("/competition/com_list/")
if request.method == 'GET':
id = request.GET.get('id')
# print(id)
com_info = get_object_or_404(models.com_basic_info, com_id=id)
com_publish = get_object_or_404(models.com_publish_info, com_id=com_info)
# 插入竞赛公告
context['inform'] = str("[通知]竞赛通知")
# 插入发布信息
context['com_publish'] = com_publish
context['com_info'] = com_info
return render(request, 'competition/com_detail.html', context)
# 下载竞赛附件
def com_attach_download(request):
com_id = request.GET.get('id')
com_info = get_object_or_404(models.com_basic_info, com_id=com_id)
com_publish = get_object_or_404(models.com_publish_info, com_id=com_info)
# 返回下载
filename = str(com_publish.com_attachment)
file_path = settings.MEDIA_ROOT + filename
ext = os.path.basename(file_path).split('.')[-1].lower()
if ext not in ['py', 'db', 'sqlite3']:
response = FileResponse(open(file_path, 'rb'))
response['content_type'] = "application/octet-stream"
response['Content-Disposition'] = 'attachment; filename=' + os.path.basename(file_path)
return response
else:
raise Http404
# 报名参加比赛第一步
def com_apply_first(request):
context = {}
if request.method == 'GET':
id = request.GET.get('id')
# 获取竞赛组别信息
group_list = models.com_sort_info.objects.filter(com_id=id)
# 获取竞赛报名所需信息
info_list = get_object_or_404(models.com_need_info, com_id=id)
# 获取竞赛信息
com_info = get_object_or_404(models.com_basic_info, com_id=id)
context['com_info'] = com_info
context['info_list'] = info_list
context['group_list'] = group_list
context['tea_num'] = range(1, info_list.tea_num + 1)
num = com_info.num_stu
context['stu_num'] = range(1, num + 1)
return render(request, 'competition/apply/com_apply_first.html', context)
if request.method == "POST":
id = request.GET.get('id')
com_info = get_object_or_404(models.com_basic_info, com_id=id)
info_list = get_object_or_404(models.com_need_info, com_id=id)
group_list = models.com_sort_info.objects.filter(com_id=id)
# 人数
# 是否需要满员(1;需要, 0:不需要)
# 成员能否重复(同上)
# 作品名称不能为空
# 还有组别信息和备注
# 小组名称
num = com_info.num_stu
flag_full = com_info.need_full
flag_same = com_info.same_stu
flag_proname = info_list.product_name
flag_teanum = com_info.num_teach
flag_group = info_list.com_group
flag_else = info_list.else_info
flag_groupname = info_list.group_name
# 获取页面学号输入
stu_list = []
for i in range(1, num + 1):
name = str("stu_num" + str(i))
temp = request.POST.get(name)
temp = temp.strip()
if temp != None and temp != "":
stu_list.append(temp)
# 获取学生信息
stu_info_list = []
for stu in stu_list:
try:
name = student_model.stu_basic_info.objects.get(stu_number=stu)
except ObjectDoesNotExist:
# 回到first页面
context['message'] = '无法搜索到学号对应学生信息,请确认学号无误'
context['com_info'] = com_info
context['info_list'] = info_list
context['group_list'] = group_list
context['tea_num'] = range(1, info_list.tea_num + 1)
num = com_info.num_stu
context['stu_num'] = range(1, num + 1)
return render(request, 'competition/apply/com_apply_first.html', context)
else:
stu_info_list.append(name)
# 判断是否符合条件,不符合则跳回first页面
# 判断是够重复报名
flag = 1
if flag_same == 0:
for stu in stu_info_list:
com_list = student_model.com_stu_info.objects.filter(stu_id=stu)
for com in com_list:
if com.com_id == com_info:
flag = 0
break
elif flag_same == 1:
num = 1
for stu in stu_info_list:
com_list = student_model.com_stu_info.objects.filter(stu_id=stu)
if num == 1:
for com in com_list:
if com.com_id == com_info:
flag = 0
break
else:
for com in com_list:
if com.is_leader == 1:
flag = 0
break
num = num + 1
if flag == 0:
# 回到first页面
context['message'] = '参赛成员不符合规定哦 :('
context['com_info'] = com_info
context['info_list'] = info_list
context['group_list'] = group_list
context['tea_num'] = range(1, info_list.tea_num + 1)
num = com_info.num_stu
context['stu_num'] = range(1, num + 1)
return render(request, 'competition/apply/com_apply_first.html', context)
# 判断满员
student_num = com_info.num_stu
len_stu = len(stu_info_list)
if flag_full == 1:
if len_stu != student_num:
# 回到first页面
context['message'] = '队伍人数不足 :('
context['com_info'] = com_info
context['info_list'] = info_list
context['group_list'] = group_list
context['tea_num'] = range(1, info_list.tea_num + 1)
num = com_info.num_stu
context['stu_num'] = range(1, num + 1)
return render(request, 'competition/apply/com_apply_first.html', context)
# 判断学号重复
list1 = stu_info_list
list2 = list(set(list1))
if len(list1) != len(list2):
# 回到first页面
context['message'] = '有重复人员的哦 :('
context['com_info'] = com_info
context['info_list'] = info_list
context['group_list'] = group_list
context['tea_num'] = range(1, info_list.tea_num + 1)
num = com_info.num_stu
context['stu_num'] = range(1, num + 1)
return render(request, 'competition/apply/com_apply_first.html', context)
# 判断作品名称是否为空
if flag_proname == 1:
prodect_name = request.POST.get('product_name')
if prodect_name == "":
context['message'] = "作品名称没有填哦 X D "
context['com_info'] = com_info
context['info_list'] = info_list
context['group_list'] = group_list
context['tea_num'] = range(1, info_list.tea_num + 1)
num = com_info.num_stu
context['stu_num'] = range(1, num + 1)
return render(request, 'competition/apply/com_apply_first.html', context)
prodect_name = prodect_name.strip()
context['product_name'] = prodect_name
# 判断小组名称是否为空
if flag_groupname == 1:
group_name = request.POST.get('group_name')
if group_name == "":
context['message'] = "小组名称没有填哦 X D "
context['com_info'] = com_info
context['info_list'] = info_list
context['group_list'] = group_list
context['tea_num'] = range(1, info_list.tea_num + 1)
num = com_info.num_stu
context['stu_num'] = range(1, num + 1)
return render(request, 'competition/apply/com_apply_first.html', context)
group_name = group_name.strip()
context['group_name'] = group_name
# 对指导教师进行判断
teach_list = []
if flag_teanum:
for i in range(1, flag_teanum + 1):
name = str('tea_name' + str(i))
temp = request.POST.get(name)
temp = temp.strip()
teach = teacher_model.teach_basic_info.objects.filter(tea_name=temp)
if len(teach) == 0:
# 回到first页面
context['message'] = '无法搜索到对应指导教师信息,请确认姓名无误'
context['com_info'] = com_info
context['info_list'] = info_list
context['group_list'] = group_list
context['tea_num'] = range(1, info_list.tea_num + 1)
num = com_info.num_stu
context['stu_num'] = range(1, num + 1)
return render(request, 'competition/apply/com_apply_first.html', context)
else:
# 教师信息列表中也是一个列表
teach_list.append(teach)
teach_list = zip(teach_list, range(1, info_list.tea_num + 1))
context['teach_list'] = teach_list
# 对组别信息进行判断
if flag_group == 1:
group = request.POST.get("group")
group_list = models.com_sort_info.objects.filter(com_id=id, sort_name=group)
context['group'] = group_list[0]
# 备注信息
if flag_else == 1:
else_info = request.POST.get("else_info")
else_info = else_info.strip()
context['else_info'] = else_info
# 跳转确认页面
context['stu_list'] = stu_info_list
context['info_list'] = info_list
return render(request, 'competition/apply/com_apply_second.html', context)
return render(request, 'competition/apply/com_apply_first.html', context)
# 报名参加比赛第二步
def com_apply_second(request):
context = {}
if request.method == 'POST':
id = request.GET.get('id')
com_info = get_object_or_404(models.com_basic_info, com_id=id)
info_list = get_object_or_404(models.com_need_info, com_id=id)
group_list = models.com_sort_info.objects.filter(com_id=id)
num = com_info.num_stu
flag_full = com_info.need_full
flag_same = com_info.same_stu
flag_proname = info_list.product_name
flag_teanum = com_info.num_teach
flag_group = info_list.com_group
flag_else = info_list.else_info
flag_groupname = info_list.group_name
# 优化
stu_list = []
for i in range(1, num + 1):
stu_number = request.POST.get('stu_num' + str(i))
if stu_number != None and stu_number != "":
stu_list.append(stu_number)
len_stu = len(stu_list)
stu_info_list = []
for stu in stu_list:
name = student_model.stu_basic_info.objects.get(stu_number=stu)
stu_info_list.append(name)
group_name = request.POST.get('group_name')
group = request.POST.get('group')
product_name = request.POST.get('product_name')
else_info = request.POST.get('else_info')
# 分批保存信息,包括:竞赛小组信息、小组成员信息
# 保存竞赛小组信息
com_group = models.com_group_basic_info()
# id
com_group.com_id = get_object_or_404(models.com_basic_info, com_id=id)
# 小组名字
if flag_groupname == 1:
com_group.group_name = group_name
# 小组人数
com_group.group_num = len_stu
# 竞赛组别
if flag_group == 1:
group_list = models.com_sort_info.objects.filter(com_id=id, sort_name=group)
com_group.com_group = group_list[0]
# 作品名字
if flag_proname == 1:
com_group.product_name = product_name
# 备注
if flag_else == 1:
com_group.else_info = else_info
com_group.save()
group_id = com_group.group_id
# 保存小组成员信息
number = 1
for i in stu_info_list:
stu = student_model.com_stu_info()
stu.com_id = get_object_or_404(models.com_basic_info, com_id=id)
stu.group_id = get_object_or_404(models.com_group_basic_info, group_id=group_id)
stu.stu_id = i
if number == 1:
stu.is_leader = 1
number += 1
stu.save()
teach_list = []
if flag_teanum:
for i in range(1, flag_teanum + 1):
name = str('tea_name' + str(i))
temp = request.POST.get(name)
if temp != "" and temp != None:
teach = teacher_model.teach_basic_info.objects.get(tea_number=temp)
teach_list.append(teach)
for i in teach_list:
teach = teacher_model.com_teach_info()
teach.com_id = get_object_or_404(models.com_basic_info, com_id=id)
teach.group_id = get_object_or_404(models.com_group_basic_info, group_id=group_id)
teach.teach_id = i
teach.save()
return redirect('/student/personal_center_stu/?tag=2')
return redirect('/student/personal_center_stu/?tag=2')
|
import math
import random
from ..algorithm_common import AlgorithmCommon as AC
from ..algorithm_common import IAlgorithm
class Tabu(IAlgorithm):
def __init__(self,
individual_max,
epsilon=0.1,
tabu_list_size=100,
tabu_range_rate=0.1,
):
self.individual_max = individual_max
self.epsilon = epsilon
self.tabu_list_size = tabu_list_size
self.tabu_range_rate = tabu_range_rate
def init(self, problem):
self.problem = problem
self.count = 0
self.tabu_range = (problem.MAX_VAL - problem.MIN_VAL) * self.tabu_range_rate
self.tabu_list = []
self.best_individual = problem.create()
self.individuals = [self.best_individual]
def getMaxElement(self):
return self.best_individual
def getElements(self):
return self.individuals
def step(self):
# 基準となる個体(前stepの最良個体)
individual = self.individuals[-1]
# 個体数が集まるまで近傍を生成
next_individuals = []
for _ in range(self.individual_max*99): # for safety
if len(next_individuals) >= self.individual_max:
break
# 近傍を生成
pos = individual.getArray()
ri = random.randint(0, len(pos)-1) # 1成分は必ず変更
trans = [] # タブーリスト用
for i in range(len(pos)):
if i == ri or random.random() < self.epsilon:
# ランダムな値に変更
val = self.problem.randomVal()
trans.append((i, pos[i]-val)) # 変更内容を保存
pos[i] = val
# タブーリストにある遷移は作らない
if self._isInTabuList(trans):
continue
self.count += 1
o = self.problem.create(pos)
next_individuals.append((o, trans))
# 近傍が0なら別途新しく生成する
if len(next_individuals) == 0:
o = self.problem.create()
if self.best_individual.getScore() < o.getScore():
self.best_individual = o
self.individuals = [o]
return
# sort
next_individuals.sort(key=lambda x: x[0].getScore())
# 次のstep用に保存
self.individuals = [x[0] for x in next_individuals]
# このstepでの最良個体
step_best = next_individuals[-1][0]
if self.best_individual.getScore() < step_best.getScore():
self.best_individual = step_best
# タブーリストに追加
step_best_trans = next_individuals[-1][1]
self.tabu_list.append(step_best_trans)
if len(self.tabu_list) > self.tabu_list_size:
self.tabu_list.pop(0)
def _isInTabuList(self, trans):
for tabu in self.tabu_list:
# 個数が違えば違う
if len(tabu) != len(trans):
continue
f = True
for i in range(len(trans)):
# 対象要素が違えば違う
if tabu[i][0] != trans[i][0]:
f = False
break
# 範囲内なら該当
tabu_val = tabu[i][1]
val = trans[i][1]
if not(tabu_val - self.tabu_range < val and val < tabu_val + self.tabu_range):
f = False
break
# 該当するものがある
if f:
return True
return False
|
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import keras2onnx
import keras_contrib
import numpy as np
from keras2onnx import set_converter
from keras2onnx.proto import keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../tests/'))
from test_utils import run_onnx_runtime, convert_InstanceNormalizationLayer
Activation = keras.layers.Activation
Add = keras.layers.Add
BatchNormalization = keras.layers.BatchNormalization
Concatenate = keras.layers.Concatenate
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
InstanceNormalization = keras_contrib.layers.InstanceNormalization
Input = keras.layers.Input
LeakyReLU = keras.layers.LeakyReLU
multiply = keras.layers.multiply
Reshape = keras.layers.Reshape
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
# From https://github.com/eriklindernoren/Keras-GAN/blob/master/pixelda/pixelda.py
class PixelDA():
def __init__(self):
# Input shape
self.img_rows = 32
self.img_cols = 32
self.channels = 3
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.num_classes = 10
# Loss weights
lambda_adv = 10
lambda_clf = 1
# Calculate output shape of D (PatchGAN)
patch = int(self.img_rows / 2**4)
self.disc_patch = (patch, patch, 1)
# Number of residual blocks in the generator
self.residual_blocks = 6
# Number of filters in first layer of discriminator and classifier
self.df = 64
self.cf = 64
# Build and compile the discriminators
self.discriminator = self.build_discriminator()
# Build the generator
self.generator = self.build_generator()
# Build the task (classification) network
self.clf = self.build_classifier()
# Input images from both domains
img_A = Input(shape=self.img_shape)
img_B = Input(shape=self.img_shape)
# Translate images from domain A to domain B
fake_B = self.generator(img_A)
# Classify the translated image
class_pred = self.clf(fake_B)
# For the combined model we will only train the generator and classifier
self.discriminator.trainable = False
# Discriminator determines validity of translated images
valid = self.discriminator(fake_B)
self.combined = Model(img_A, [valid, class_pred])
def build_generator(self):
"""Resnet Generator"""
def residual_block(layer_input):
"""Residual block described in paper"""
d = Conv2D(64, kernel_size=3, strides=1, padding='same')(layer_input)
d = BatchNormalization(momentum=0.8)(d)
d = Activation('relu')(d)
d = Conv2D(64, kernel_size=3, strides=1, padding='same')(d)
d = BatchNormalization(momentum=0.8)(d)
d = Add()([d, layer_input])
return d
# Image input
img = Input(shape=self.img_shape)
l1 = Conv2D(64, kernel_size=3, padding='same', activation='relu')(img)
# Propogate signal through residual blocks
r = residual_block(l1)
for _ in range(self.residual_blocks - 1):
r = residual_block(r)
output_img = Conv2D(self.channels, kernel_size=3, padding='same', activation='tanh')(r)
return Model(img, output_img)
def build_discriminator(self):
def d_layer(layer_input, filters, f_size=4, normalization=True):
"""Discriminator layer"""
d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
if normalization:
d = InstanceNormalization()(d)
return d
img = Input(shape=self.img_shape)
d1 = d_layer(img, self.df, normalization=False)
d2 = d_layer(d1, self.df*2)
d3 = d_layer(d2, self.df*4)
d4 = d_layer(d3, self.df*8)
validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4)
return Model(img, validity)
def build_classifier(self):
def clf_layer(layer_input, filters, f_size=4, normalization=True):
"""Classifier layer"""
d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
if normalization:
d = InstanceNormalization()(d)
return d
img = Input(shape=self.img_shape)
c1 = clf_layer(img, self.cf, normalization=False)
c2 = clf_layer(c1, self.cf*2)
c3 = clf_layer(c2, self.cf*4)
c4 = clf_layer(c3, self.cf*8)
c5 = clf_layer(c4, self.cf*8)
class_pred = Dense(self.num_classes, activation='softmax')(Flatten()(c5))
return Model(img, class_pred)
set_converter(keras_contrib.layers.InstanceNormalization, convert_InstanceNormalizationLayer)
class TestPixelDA(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def test_PixelDA(self):
keras_model = PixelDA().combined
x = np.random.rand(5, 32, 32, 3).astype(np.float32)
expected = keras_model.predict([x])
onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected, self.model_files, atol=1.e-5))
if __name__ == "__main__":
unittest.main()
|
import audio_dspy as adsp
_fs_ = 44100
_num_ = 1000
_dur_ = 1
class SweepsTimeSuite:
"""
Benchmarkng Suite for sweep functions
"""
def setup(self):
self.sweep = adsp.sweep_log(1, _fs_/2, _dur_, _fs_)
self.sweep2 = adsp.sweep_log(1, _fs_/2, _dur_, _fs_)
def time_log_sweep(self):
for _ in range(_num_):
adsp.sweep_log(1, _fs_/2, _dur_, _fs_)
def time_lin_sweep(self):
for _ in range(_num_):
adsp.sweep_lin(_dur_, _fs_)
def time_sweep2ir(self):
for _ in range(_num_):
adsp.sweep2ir(self.sweep, self.sweep2)
|
import torch
import torch.nn as nn
from se3cnn.point.radial import CosineBasisModel
from e3_layer.persistent_point.data_hub import DataHub
from e3_layer.persistent_point.periodic_convolution import PeriodicConvolutionWithKernel
from e3_layer.persistent_point.gate import Gate
class EQLayer(nn.Module):
def __init__(self, data_hub, number_of_the_layer, radial_basis_function_kwargs, gate_kwargs, radial_basis_function=CosineBasisModel, convolution=PeriodicConvolutionWithKernel, gate=Gate, device=torch.device(type='cuda', index=0)):
super().__init__()
self.data_hub = data_hub
self.n = number_of_the_layer
self.radial_basis_function_kwargs = radial_basis_function_kwargs # or {'max_radius': 5.0, 'number_of_basis': 100, 'h': 100, 'L': 2, 'act': relu}
self.gate_kwargs = gate_kwargs # or {'scalar_activation': relu, 'gate_activation': sigmoid}
self.radial_basis_trainable_function = radial_basis_function(out_dim=self.data_hub.R_base_offsets[self.n][-1].item(), **radial_basis_function_kwargs).to(device)
self.convolution = convolution(self.data_hub, self.n)
if self.data_hub.has_gates[self.n]:
self.gate = gate(self.data_hub, self.n, **gate_kwargs)
def forward(self, features):
rbf_coefficients = self.radial_basis_trainable_function(self.data_hub.radii)
return self.gate(self.convolution(features, rbf_coefficients)) if hasattr(self, 'gate') else self.convolution(features, rbf_coefficients)
class EQNetwork(nn.Module):
def __init__(self, representations, radial_basis_functions_kwargs, gate_kwargs, radial_basis_function=CosineBasisModel, convolution=PeriodicConvolutionWithKernel, gate=Gate, has_gates=True, normalization='norm', device=torch.device(type='cuda', index=0)):
super().__init__()
number_of_layers = len(representations) - 1
# region input check
assert isinstance(has_gates, bool) or \
(isinstance(has_gates, (list, tuple)) and (len(has_gates) == 1 or len(has_gates) == number_of_layers) and all(isinstance(has_gate, bool) for has_gate in has_gates)), \
"has_gates should be specified as a single boolean value or as list/tuple of boolean values that matches number of layers"
assert isinstance(radial_basis_functions_kwargs, dict) or \
(isinstance(radial_basis_functions_kwargs, (list, tuple)) and (len(radial_basis_functions_kwargs) == 1 or len(radial_basis_functions_kwargs) == number_of_layers) and all(isinstance(rbf_args, dict) for rbf_args in radial_basis_functions_kwargs)), \
"radial_basis_functions_kwargs should be specified as a single dict (shared for all layers) or as list/tuple of dicts - one for each layers"
assert isinstance(gate_kwargs, dict) or \
(isinstance(gate_kwargs, (list, tuple)) and (len(gate_kwargs) == 1 or len(gate_kwargs) == number_of_layers) and all(isinstance(g_args, dict) for g_args in gate_kwargs)), \
"gate_kwargs should be specified as a single dict (shared for all layers) or as list/tuple of dicts - one for each layers"
# endregion
has_gates_list = [has_gates] if isinstance(has_gates, bool) else has_gates
has_gates_list = has_gates_list * number_of_layers if len(has_gates_list) == 1 else has_gates_list
# construct representations, without gates - gates got added in Data Hub where necessary
# can have mixed specifications (short - multiplicity, long - multiplicity and rotation order) across layers, but within layer it should be consistent
representations = [[(mul, l) if isinstance(mul, int) else mul for l, mul in enumerate(rs)] for rs in representations]
self.data_hub = DataHub(representations, has_gates_list, normalization, device)
radial_basis_functions_kwargs_list = [radial_basis_functions_kwargs] if isinstance(radial_basis_functions_kwargs, dict) else radial_basis_functions_kwargs
radial_basis_functions_kwargs_list = radial_basis_functions_kwargs_list * number_of_layers if len(radial_basis_functions_kwargs_list) == 1 else radial_basis_functions_kwargs_list
gate_kwargs_list = [gate_kwargs] if isinstance(gate_kwargs, dict) else gate_kwargs
gate_kwargs_list = gate_kwargs_list * number_of_layers if len(gate_kwargs_list) == 1 else gate_kwargs_list
layers = []
for i in range(number_of_layers):
layers.append(EQLayer(self.data_hub, i, radial_basis_functions_kwargs_list[i], gate_kwargs_list[i], radial_basis_function, convolution, gate, device))
self.layers = nn.Sequential(*layers)
def forward(self, features, radii_vectors, n_norm, ab_p_to_a, ab_p_to_b):
self.data_hub(radii_vectors, n_norm, ab_p_to_a, ab_p_to_b)
return self.layers(features).mean(dim=0)
|
""" HealthDES - A python library to support discrete event simulation in health and social care """
import simpy
from .Routing import Activity_ID
class DecisionBase:
# TODO: Need to return a function which includes list of next activities
def set_next_activity(self, activity):
return self.get_next_activity
def get_next_activity(self, person, activity_a):
""" Dummy method to anchor class """
return Activity_ID(None, None, None)
class NextActivity(DecisionBase):
def set_next_activity(next_activity_id):
pass
def get_next_activity(self, person, activity_a):
# TODO: Need to sort routing access
activity = person.routing.get_activity(activity_a.routing_id)
# Add this instance to the arguments list
activity.kwargs['person'] = self
# Create two communication pipes for bi-directional communication with activity
activity.kwargs['message_to_activity'] = simpy.Store(self.env)
activity.kwargs['message_to_person'] = simpy.Store(self.env)
return activity
|
import json
import time
import graphics
DEBUG = True
class GShape:
"""
A parent class that represents a shape. It is named GShape for "Graphical shape"
since "shape" is such a generic word.
"""
def __init__(self, win: object, name: str, channel: int, x: int, y: int, color: str):
"""
A generic shape object.
:param win: The graphics window.
:param name: A string name of the thing this shape represents.
:param channel: What channel on the Christmas lights controller this is associated with.
:param x: The x coordinate of the top left of box containing the shape.
:param y: The y coordinate of the top left of box containing the shape.
:param color: The X11 name of the color as a string of the shape when it is on.
"""
self._win = win
self._name = name
self._channel = channel
self._x = x
self._y = y
self._color = color
self._graphics_object = None
self._current_color = ChannelCollection.GRAY
def draw(self):
"""
Draws the shape in the graphics window.
:return: None
"""
self._graphics_object.setWidth(1)
self._graphics_object.setOutline('black')
self._graphics_object.setFill(self._current_color)
self._graphics_object.draw(self._win)
def on(self):
"""
Turns on this shape. Turning on means changing the color from gray to the
color this object was initialized to be.
:return: None
"""
self._graphics_object.undraw()
self._current_color = self._color
self.draw()
def off(self):
"""
Turns off this shape. Turning off means changing the color to gray from the
color this object was initialized to be.
:return: None
"""
self._graphics_object.undraw()
# We do not turn off channel 0 (background stuff, etc.)
if self._channel != 0:
self._current_color = ChannelCollection.GRAY
self.draw()
class Triangle(GShape):
"""
A shape that is an equilateral triangle that points up.
"""
def __init__(self, win: object, name: str, channel: int, x: int, y: int, color: str, height: int, width: int):
"""
Initializes this Triangle GShape object.
:param win: The graphics window.
:param name: A string name of the thing this shape represents.
:param channel: What channel on the Christmas lights controller this is associated with.
:param x: The x coordinate of the top left of box containing the shape.
:param y: The y coordinate of the top left of box containing the shape.
:param color: The X11 name of the color as a string of the shape when it is on.
:param height: The height of this Rectangle (from the base to the vertex between the
equal-length sides at the top.
:param width: The width of the base of this Triangle.
"""
super().__init__(win, name, channel, x, y, color)
self._height = height
self._width = width
bottom_left = graphics.Point(self._x, self._y + self._height)
top_center = graphics.Point(self._x + self._width/2, self._y)
bottom_right = graphics.Point(self._x + self._width, self._y + self._height)
self._graphics_object = graphics.Polygon(bottom_left, top_center, bottom_right)
if DEBUG: print('Triangle created (%s)' % (name))
class Circle(GShape):
"""
A shape that is a circle.
"""
def __init__(self, win: object, name: str, channel: int, x: int, y: int, color: str, radius: int):
"""
Initializes this Circle GShape object.
:param win: The graphics window.
:param name: A string name of the thing this shape represents.
:param channel: What channel on the Christmas lights controller this is associated with.
:param x: The x coordinate of the top left of box containing the shape.
:param y: The y coordinate of the top left of box containing the shape.
:param color: The X11 name of the color as a string of the shape when it is on.
:param radius: The radius of this Circle.
"""
super().__init__(win, name, channel, x, y, color)
self._radius = radius
center_point = graphics.Point(self._x + self._radius/2, self._y + self._radius/2)
self._graphics_object = graphics.Circle(center_point, self._radius)
if DEBUG: print('Circle created (%s)' % (name))
class Rectangle(GShape):
"""
A shape that is a rectangle.
"""
def __init__(self, win: object, name: str, channel: int, x: int, y: int, color: str, height: int, width: int):
"""
Initializes this Rectangle GShape object.
:param win: The graphics window.
:param name: A string name of the thing this shape represents.
:param channel: What channel on the Christmas lights controller this is associated with.
:param x: The x coordinate of the top left of box containing the shape.
:param y: The y coordinate of the top left of box containing the shape.
:param color: The X11 name of the color as a string of the shape when it is on.
:param height: The height of this Rectangle.
:param width: The width of this Rectangle.
"""
super().__init__(win, name, channel, x, y, color)
self._height = height
self._width = width
top_left = graphics.Point(self._x , self._y)
bottom_right = graphics.Point(self._x + self._width, self._y + self._height)
self._graphics_object = graphics.Rectangle(top_left, bottom_right)
if DEBUG: print('Rectangle created (%s)' % (name))
class Line(GShape):
"""
A shape that is a line segment.
"""
def __init__(self, win: object, name: str, channel: int, x: int, y: int, color: str, x2: int, y2: int):
"""
Initializes this Line GShape object.
:param win: The graphics window.
:param name: A string name of the thing this shape represents.
:param channel: What channel on the Christmas lights controller this is associated with.
:param x: The x coordinate of one end of the line.
:param y: The y coordinate of one end of the line.
:param color: The X11 name of the color as a string of the shape when it is on.
:param x2: The x coordinate of the other end of the line.
:param y2: The y coordinate of the other end of the line.
"""
super().__init__(win, name, channel, x, y, color)
self._x2 = x2
self._y2 = y2
point1 = graphics.Point(self._x, self._y)
point2 = graphics.Point(self._x2, self._y2)
self._graphics_object = graphics.Line(point1, point2)
if DEBUG: print('Line created (%s)' % (name))
class ChannelCollection:
"""
A collection of channel objects.
"""
# The color of a GShape that is turned off.
GRAY = "Gray41"
def __init__(self):
"""
Initializes the ChannelCollection with 17 channels. Channel 0 is for anything
that does not turn off and on. Channel 0 is always on. There can be multiple
GShape objects in each channel.
"""
self._channels = []
self._iter_index = 1
# We will have channels 0 - 16. 0 does not blink.
for i in range(0, 17):
self._channels.append([])
def add(self, shape_object: object):
"""
Adds a GShape object to the Channel collection into the channel associated with
the GShape object when the GShape was initialized.
:param shape_object: The GShape object to add.
:return: None
"""
self._channels[shape_object._channel].append(shape_object)
def on(self, channel_number: int):
"""
Turns on every GShape object in the requested channel number.
:param channel_number: The channel number to turn on.
:return: None
"""
for a_shape in self._channels[channel_number]:
a_shape.on()
def off(self, channel_number: int):
"""
Turns off every GShape object in the requested channel number.
:param channel_number: The channel number to turn off.
:return: None
"""
for a_shape in self._channels[channel_number]:
a_shape.off()
def __iter__(self):
"""
This makes this object iterable. This resets the object's iter "pointer"
every time a new iterator is created. This probably should never be used in
a nested for loop where both for loops are iterating over this object.
:return: A reference to this iterable object.
"""
self._iter_index = 1
return self
def __next__(self):
"""
Returns the next channel in this iterable ChannelCollection object.
:return: A reference to the list of GShape objects for the next channel.
(One channel in the ChannelCollection contains a list of GShape objects that should
all turn on or off when a channel is turned on or off.)
"""
list = self._channels[self._iter_index]
self._iter_index += 1
return list
class GraphicsJson:
"""
A class that creates a Graphical visualization based on the data in a JSON file.
"""
def __init__(self, filename: str, channel_collection: object):
"""
Reads in a JSON file and creates a graphical visualization from the data.
:param filename: The filename of the JSON file to read in.
:param channel_collection: The ChannelCollection object to populate.
"""
json_file = open(filename, 'r')
self._json_data = json.load(json_file)
json_file.close()
self._width = self._json_data["window_width"]
self._height = self._json_data["window_height"]
self._win = graphics.GraphWin("Map", self._width, self._height)
self._win.setBackground(self._json_data['bg_color'])
self._channel_collection = channel_collection
if DEBUG: print(f'Using Map: { self._json_data["name"] }')
channels = self._json_data['channels']
for one_channel in channels:
# Create a GShape based on the dictionary for this one "channel".
shape = self.shape_factory(one_channel)
self._channel_collection.add(shape)
shape.draw()
self._channel_collection.on(0)
def __del__(self):
"""
Destructor for this object that closes the graphics environment.
:return: None
"""
self._win.close()
def close(self):
"""
Closes the graphics environment.
:return: None
"""
self._win.close()
def shape_factory(self, channel_entry: dict):
"""
Takes in a dictionary member from the "channels" array in the JSON file and
creates a GShape object.
:param channel_entry: A dictionary member from the "channels" array in the JSON file.
:return: A GShape object based on the input JSON data.
"""
name = channel_entry['name']
channel = channel_entry['channel']
if channel > 16:
channel = 0
elif channel < 0:
channel = 0
shape = channel_entry['shape']
x = channel_entry['x']
y = channel_entry['y']
color = channel_entry['color']
if shape.lower() == 'triangle':
height = channel_entry['height']
width = channel_entry['width']
return Triangle(self._win, name, channel, x, y, color, height, width)
elif shape.lower() == 'circle':
radius = channel_entry['radius']
return Circle(self._win, name, channel, x, y, color, radius)
elif shape.lower() == 'rectangle':
height = channel_entry['height']
width = channel_entry['width']
return Rectangle(self._win, name, channel, x, y, color, height, width)
elif shape.lower() == 'line':
x2 = channel_entry['x2']
y2 = channel_entry['y2']
return Line(self._win, name, channel, x, y, color, x2, y2)
def all_on(self):
"""
Turns on all of the channels in the ChannelCollection.
:return: None
"""
for i in range(1, 17):
self._channel_collection.on(i)
def all_off(self):
"""
Turns off all of the channels in the ChannelCollection.
:return: None
"""
for i in range(1, 17):
self._channel_collection.off(i)
if __name__ == '__main__':
# Tests GraphicsJson without using any of the rest of the ChristmasLights code.
channel_collection = ChannelCollection()
graphics_json = GraphicsJson('MapData.json', channel_collection)
for i in range(1, 17):
for j in range(1, 17):
if i == j:
channel_collection.on(j)
else:
channel_collection.off(j)
time.sleep(.5)
graphics_json.all_off()
graphics_json._win.getMouse()
|
# -*- coding: utf-8 -*-
"""PSF DECONVOLUTION MODULE
This module deconvolves a set of galaxy images with a known object-variant PSF.
:Author: Samuel Farrens <samuel.farrens@gmail.com>
"""
from __future__ import print_function
from builtins import range, zip
from scipy.linalg import norm
from modopt.math.stats import sigma_mad
from modopt.opt.algorithms import *
from modopt.opt.cost import costObj
from modopt.opt.linear import *
from modopt.opt.proximity import *
from modopt.opt.reweight import cwbReweight
from modopt.signal.wavelet import *
from . gradient import *
def set_noise(data, **kwargs):
"""Set the noise level
This method calculates the noise standard deviation using the median
absolute deviation (MAD) of the input data and adds it to the keyword
arguments.
Parameters
----------
data : np.ndarray
Input noisy data (3D array)
Returns
-------
dict Updated keyword arguments
"""
# It the noise is not already provided calculate it using the MAD
if isinstance(kwargs['noise_est'], type(None)):
kwargs['noise_est'] = sigma_mad(data)
print(' - Noise Estimate:', kwargs['noise_est'])
if 'log' in kwargs:
kwargs['log'].info(' - Noise Estimate: ' + str(kwargs['noise_est']))
return kwargs
def set_grad_op(data, psf, **kwargs):
"""Set the gradient operator
This method defines the gradient operator class to use and add an instance
to the keyword arguments.
Parameters
----------
data : np.ndarray
Input noisy data (3D array)
psf : np.ndarray
PSF data (2D or 3D array)
Returns
-------
dict Updated keyword arguments
"""
# Set the gradient operator
if kwargs['grad_type'] == 'psf_known':
kwargs['grad_op'] = (GradKnownPSF(data, psf,
psf_type=kwargs['psf_type'],
convolve_method=kwargs['convolve_method']))
elif kwargs['grad_type'] == 'psf_unknown':
kwargs['grad_op'] = (GradUnknownPSF(data, psf,
prox=Positivity(),
psf_type=kwargs['psf_type'],
convolve_method=kwargs['convolve_method'],
beta_reg=kwargs['beta_psf'],
lambda_reg=kwargs['lambda_psf']))
elif kwargs['grad_type'] == 'shape':
kwargs['grad_op'] = (GradShape(data, psf, psf_type=kwargs['psf_type'],
convolve_method=kwargs['convolve_method'],
lambda_reg=kwargs['lambda_shape']))
elif kwargs['grad_type'] == 'none':
kwargs['grad_op'] = GradNone(data, psf, psf_type=kwargs['psf_type'],
convolve_method=kwargs['convolve_method'])
print(' - Spectral Radius:', kwargs['grad_op'].spec_rad)
if 'log' in kwargs:
kwargs['log'].info(' - Spectral Radius: ' +
str(kwargs['grad_op'].spec_rad))
return kwargs
def set_linear_op(data, **kwargs):
"""Set the gradient operator
This method defines the gradient operator class to use and add an instance
to the keyword arguments. It additionally add the l1 norm of the linear
operator and the wavelet filters (if used) to the kwagrs.
Parameters
----------
data : np.ndarray
Input noisy data (3D array)
Returns
-------
dict Updated keyword arguments
ToDo
----
- Clean up wavelet_filters and l1norm
"""
# Set the options for mr_transform (for sparsity)
if kwargs['mode'] in ('all', 'sparse'):
wavelet_opt = ['-t ' + kwargs['wavelet_type']]
kwargs['wavelet_filters'] = get_mr_filters(data.shape[-2:],
wavelet_opt)
kwargs['linear_l1norm'] = (data.shape[0] * np.sqrt(sum((np.sum(np.abs
(filter_i)) ** 2 for filter_i in
kwargs['wavelet_filters']))))
# Set the linear operator
if kwargs['mode'] == 'all':
kwargs['linear_op'] = LinearCombo([WaveletConvolve(
kwargs['wavelet_filters'],
method=kwargs['convolve_method'],),
Identity()])
elif kwargs['mode'] in ('lowr', 'grad'):
kwargs['linear_op'] = Identity()
kwargs['linear_l1norm'] = 1.0
elif kwargs['mode'] == 'sparse':
kwargs['linear_op'] = WaveletConvolve(kwargs['wavelet_filters'],
method=kwargs['convolve_method'],
)
return kwargs
def set_sparse_weights(data_shape, psf, **kwargs):
"""Set the sparsity weights
This method defines the weights for thresholding in the sparse domain and
add them to the keyword arguments. It additionally defines the shape of the
dual variable.
Parameters
----------
data_shape : tuple
Shape of the input data array
psf : np.ndarray
PSF data (2D or 3D array)
Returns
-------
dict Updated keyword arguments
"""
# Convolve the PSF with the wavelet filters
if kwargs['psf_type'] == 'fixed':
filter_conv = (filter_convolve(np.rot90(psf, 2),
kwargs['wavelet_filters'],
method=kwargs['convolve_method']))
filter_norm = np.array([norm(a) * b * np.ones(data_shape[1:])
for a, b in zip(filter_conv,
kwargs['wave_thresh_factor'])])
filter_norm = np.array([filter_norm for i in
range(data_shape[0])])
else:
filter_conv = (filter_convolve_stack(np.rot90(psf, 2),
kwargs['wavelet_filters'],
method=kwargs['convolve_method']))
filter_norm = np.array([[norm(b) * c * np.ones(data_shape[1:])
for b, c in zip(a,
kwargs['wave_thresh_factor'])]
for a in filter_conv])
# Define a reweighting instance
kwargs['reweight'] = cwbReweight(kwargs['noise_est'] * filter_norm)
# Set the shape of the dual variable
dual_shape = ([kwargs['wavelet_filters'].shape[0]] + list(data_shape))
dual_shape[0], dual_shape[1] = dual_shape[1], dual_shape[0]
kwargs['dual_shape'] = dual_shape
return kwargs
def set_condat_param(**kwargs):
"""Set the Condat-Vu parameters
This method sets the values of tau and sigma in the Condat-Vu proximal-dual
splitting algorithm if not already provided. It additionally checks that
the combination of values will lead to convergence.
Returns
-------
dict Updated keyword arguments
"""
# Define a method for calculating sigma and/or tau
def get_sig_tau():
return 1.0 / (kwargs['grad_op'].spec_rad + kwargs['linear_l1norm'])
# Calulate tau if not provided
if isinstance(kwargs['condat_tau'], type(None)):
kwargs['condat_tau'] = get_sig_tau()
# Calculate sigma if not provided
if isinstance(kwargs['condat_sigma'], type(None)):
kwargs['condat_sigma'] = get_sig_tau()
print(' - tau:', kwargs['condat_tau'])
print(' - sigma:', kwargs['condat_sigma'])
print(' - rho:', kwargs['relax'])
if 'log' in kwargs:
kwargs['log'].info(' - tau: ' + str(kwargs['condat_tau']))
kwargs['log'].info(' - sigma: ' + str(kwargs['condat_sigma']))
kwargs['log'].info(' - rho: ' + str(kwargs['relax']))
# Test combination of sigma and tau
sig_tau_test = (1.0 / kwargs['condat_tau'] - kwargs['condat_sigma'] *
kwargs['linear_l1norm'] ** 2 >=
kwargs['grad_op'].spec_rad / 2.0)
print(' - 1/tau - sigma||L||^2 >= beta/2:', sig_tau_test)
if 'log' in kwargs:
kwargs['log'].info(' - 1/tau - sigma||L||^2 >= beta/2: ' +
str(sig_tau_test))
return kwargs
def get_lambda(n_images, p_pixel, sigma, spec_rad):
"""Get lambda value
This method calculates the singular value threshold for low-rank
regularisation
Parameters
----------
n_images : int
Total number of images
p_pixel : int
Total number of pixels
sigma : float
Noise standard deviation
spec_rad : float
The spectral radius of the gradient operator
Returns
-------
float Lambda value
"""
return sigma * np.sqrt(np.max([n_images + 1, p_pixel])) * spec_rad
def set_lowr_thresh(data_shape, **kwargs):
"""Set the low-rank threshold
This method sets the value of the low-rank singular value threshold.
Parameters
----------
data_shape : tuple
Shape of the input data array
Returns
-------
dict Updated keyword arguments
"""
if kwargs['lowr_type'] == 'standard':
kwargs['lambda'] = (kwargs['lowr_thresh_factor'] *
get_lambda(data_shape[0], np.prod(data_shape[1:]),
kwargs['noise_est'], kwargs['grad_op'].spec_rad))
elif kwargs['lowr_type'] == 'ngole':
kwargs['lambda'] = (kwargs['lowr_thresh_factor'] * kwargs['noise_est'])
print(' - lambda:', kwargs['lambda'])
if 'log' in kwargs:
kwargs['log'].info(' - lambda: ' + str(kwargs['lambda']))
return kwargs
def set_primal_dual(data_shape, **kwargs):
"""Set primal and dual variables
This method sets the initial values of the primal and dual variables
Parameters
----------
data_shape : tuple
Shape of the input data array
Returns
-------
dict Updated keyword arguments
"""
# Set the initial values of the primal variable if not provided
if isinstance(kwargs['primal'], type(None)):
kwargs['primal'] = np.ones(data_shape)
# Set the initial values of the dual variable
if kwargs['mode'] == 'all':
kwargs['dual'] = np.empty(2, dtype=np.ndarray)
kwargs['dual'][0] = np.ones(kwargs['dual_shape'])
kwargs['dual'][1] = np.ones(data_shape)
elif kwargs['mode'] in ('lowr', 'grad'):
kwargs['dual'] = np.ones(data_shape)
elif kwargs['mode'] == 'sparse':
kwargs['dual'] = np.ones(kwargs['dual_shape'])
print(' - Primal Variable Shape:', kwargs['primal'].shape)
print(' - Dual Variable Shape:', kwargs['dual'].shape)
print(' ' + '-' * 70)
if 'log' in kwargs:
kwargs['log'].info(' - Primal Variable Shape: ' +
str(kwargs['primal'].shape))
kwargs['log'].info(' - Dual Variable Shape: ' +
str(kwargs['dual'].shape))
return kwargs
def set_prox_op_and_cost(data, **kwargs):
"""Set the proximity operators and cost function
This method sets the proximity operators and cost function instances.
Parameters
----------
data : np.ndarray
Input noisy data (3D array)
Returns
-------
dict Updated keyword arguments
"""
# Create a list of proximity operators
kwargs['prox_op'] = []
# Set the first operator as positivity contraint or simply identity
if not kwargs['no_pos']:
kwargs['prox_op'].append(Positivity())
else:
kwargs['prox_op'].append(IdentityProx())
# Add a second proximity operator
if kwargs['mode'] == 'all':
kwargs['prox_op'].append(ProximityCombo(
[SparseThreshold(
kwargs['linear_op'].operators[0],
kwargs['reweight'].weights,),
LowRankMatrix(kwargs['lambda'],
thresh_type=kwargs['lowr_thresh_type'],
lowr_type=kwargs['lowr_type'],
operator=kwargs['grad_op'].trans_op)]))
elif kwargs['mode'] == 'lowr':
kwargs['prox_op'].append(LowRankMatrix(kwargs['lambda'],
thresh_type=kwargs['lowr_thresh_type'],
lowr_type=kwargs['lowr_type'],
operator=kwargs['grad_op'].trans_op))
operator_list = [kwargs['grad_op']] + kwargs['prox_op']
elif kwargs['mode'] == 'sparse':
kwargs['prox_op'].append(SparseThreshold(kwargs['linear_op'],
kwargs['reweight'].weights))
elif kwargs['mode'] == 'grad':
kwargs['prox_op'].append(IdentityProx())
# Set the cost function
kwargs['cost_op'] = (costObj([kwargs['grad_op']] + kwargs['prox_op'],
tolerance=kwargs['convergence'],
cost_interval=kwargs['cost_window'],
plot_output=kwargs['output'],
verbose=not kwargs['quiet']))
return kwargs
def set_optimisation(**kwargs):
"""Set the optimisation technique
This method sets the technique used for opttimising the problem
Returns
-------
dict Updated keyword arguments
"""
# Initalise an optimisation instance
if kwargs['opt_type'] == 'fwbw':
kwargs['optimisation'] = (ForwardBackward(kwargs['primal'],
kwargs['grad_op'], kwargs['prox_op'][1],
kwargs['cost_op'], auto_iterate=False))
elif kwargs['opt_type'] == 'condat':
kwargs['optimisation'] = (Condat(kwargs['primal'], kwargs['dual'],
kwargs['grad_op'], kwargs['prox_op'][0],
kwargs['prox_op'][1], kwargs['linear_op'],
kwargs['cost_op'], rho=kwargs['relax'],
sigma=kwargs['condat_sigma'],
tau=kwargs['condat_tau'],
auto_iterate=False))
elif kwargs['opt_type'] == 'gfwbw':
kwargs['optimisation'] = (GenForwardBackward(kwargs['primal'],
kwargs['grad_op'], kwargs['prox_op'],
lambda_init=1.0, cost=kwargs['cost_op'],
weights=[0.1, 0.9],
auto_iterate=False))
return kwargs
def perform_reweighting(**kwargs):
"""Perform reweighting
This method updates the weights used for thresholding in the sparse domain
Returns
-------
dict Updated keyword arguments
"""
# Loop through number of reweightings
for i in range(kwargs['n_reweights']):
print(' - REWEIGHT:', i + 1)
print('')
# Generate the new weights following reweighting persctiption
kwargs['reweight'].reweight(kwargs['linear_op'].op(
kwargs['optimisation'].x_final))
# Perform optimisation with new weights
kwargs['optimisation'].iterate(max_iter=kwargs['n_iter'])
print('')
def run(data, psf, **kwargs):
"""Run deconvolution
This method initialises the operator classes and runs the optimisation
algorithm
Parameters
----------
data : np.ndarray
Input data array, an array of 2D images
psf : np.ndarray
Input PSF array, a single 2D PSF or an array of 2D PSFs
Returns
-------
np.ndarray decconvolved data
"""
# SET THE NOISE ESTIMATE
kwargs = set_noise(data, **kwargs)
# SET THE GRADIENT OPERATOR
kwargs = set_grad_op(data, psf, **kwargs)
# SET THE LINEAR OPERATOR
kwargs = set_linear_op(data, **kwargs)
# SET THE WEIGHTS IN THE SPARSE DOMAIN
if kwargs['mode'] in ('all', 'sparse'):
kwargs = set_sparse_weights(data.shape, psf, **kwargs)
# SET THE CONDAT-VU PARAMETERS
if kwargs['opt_type'] == 'condat':
kwargs = set_condat_param(**kwargs)
# SET THE LOW-RANK THRESHOLD
if kwargs['mode'] in ('all', 'lowr'):
kwargs = set_lowr_thresh(data.shape, **kwargs)
# SET THE INITIAL PRIMAL AND DUAL VARIABLES
kwargs = set_primal_dual(data.shape, **kwargs)
# SET THE PROXIMITY OPERATORS AND THE COST FUNCTION
kwargs = set_prox_op_and_cost(data, **kwargs)
# SET THE OPTIMISATION METHOD
kwargs = set_optimisation(**kwargs)
# PERFORM OPTIMISATION
kwargs['optimisation'].iterate(max_iter=kwargs['n_iter'])
# PERFORM REWEIGHTING FOR SPARSITY
if kwargs['mode'] in ('all', 'sparse'):
perform_reweighting(**kwargs)
# PLOT THE COST FUNCTION
if not kwargs['no_plots']:
kwargs['cost_op'].plot_cost()
# FINISH AND RETURN RESULTS
if 'log' in kwargs:
kwargs['log'].info(' - Final iteration number: ' +
str(kwargs['cost_op']._iteration))
kwargs['log'].info(' - Final log10 cost value: ' +
str(np.log10(kwargs['cost_op'].cost)))
kwargs['log'].info(' - Converged: ' +
str(kwargs['optimisation'].converge))
primal_res = kwargs['optimisation'].x_final
if kwargs['opt_type'] == 'condat':
dual_res = kwargs['optimisation'].y_final
else:
dual_res = None
if kwargs['grad_type'] == 'psf_unknown':
psf_res = kwargs['grad_op']._psf
else:
psf_res = None
return primal_res, dual_res, psf_res
|
import os
import sys
import configparser
import matplotlib
matplotlib.use('Agg') # Don't try to use X forwarding for plots
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import pandas as pd
import numpy as np
from root_pandas import read_root
### Insert path to most recent directory with fast data
_,mostRecentDir = sys.argv
config = configparser.ConfigParser()
config.read('config.ini')
plotoutDir = config['Control']['plotoutdir']
rootf=[]
for file in os.listdir(mostRecentDir):
if file.endswith('.root'):
rootf.append(file)
def append_dfs():
if len(rootf)>1:
framesroot = [read_root(mostRecentDir+'/'+rf, columns=['channel','time','error','integral']) for rf in rootf]
df = pd.concat(framesroot,axis=0)
else:
df = read_root(mostRecentDir+'/'+rootf[0], columns=['channel','time','error','integral'])
return df
def get_mc_arr(channel):
if channel == 0 or channel == 1:
return pd.read_csv(os.getcwd()+'/monte_carlo/cs137.csv', header = None, names = ['energy','Count']),(580,750)
elif channel == 2 or channel == 3:
return pd.read_csv(os.getcwd()+'/monte_carlo/co60.csv', header = None, names = ['energy','Count']),(1050,1450)
elif channel == 6 or channel == 7:
return pd.read_csv(os.getcwd()+'/monte_carlo/ti44.csv', header = None, names = ['energy','Count']),(430,1720)
def timedelta(df):
df['time'] = df['time'] - 2208988800
df['time'] = pd.to_datetime(df['time'], unit = 's')
return (df['time'].min()-df['time'].max()).seconds
def specdf(df,channel):
df = df[(df['channel']==channel)][['time','integral']]
td = timedelta(df)
df = df.groupby(pd.cut(df['integral'],bins=500)).agg('count').rename(columns={'integral' : 'Count'}).reset_index()
df = df.rename(columns={'integral' : 'energy'})
df['energy'] = df['energy'].astype('str')
df['energy'] = df['energy'].str.split(',').str[0].str.split('.').str[0].str[1:].astype('int')
df = df[['energy','Count']]
df['Count'] = df['Count']/(td)
return df
def plot_log_spectra(df,channel):
df_spec = specdf(df[df.error==0],channel)
df_spec_e = specdf(df[df.error!=0],channel)
bins = str((df_spec['energy'].max()-df_spec['energy'].min())/500)
plt.figure()
ax = df_spec.plot(x='energy',y='Count',logy=True)
df_spec_e.plot(x='energy',y='Count',logy=True, ax = ax)
ax.legend(['Error free emissions','With error'])
plt.ylabel('Rate (Hz) / ' +bins+' KeV' )
plt.xlabel('Energy (KeV)')
plt.savefig(plotoutDir+'/log_spectrum_channel'+str(channel)+'.png')
plt.close()
return
def plot_spectra(df,channel):
if channel == 4 or channel == 5:
return
df_spec = specdf(df[df.error==0],channel)
df_mc,lims = get_mc_arr(channel)
bins = str((df_spec['energy'].max()-df_spec['energy'].min())/500)
df_spec = df_spec[(df_spec['energy'] >= lims[0]) & (df_spec['energy'] <= lims[1])]
df_mc = df_mc[(df_mc['energy'] >= lims[0]) & (df_mc['energy'] <= lims[1])]
df_mc['Count'] = (df_spec['Count'].max()/df_mc['Count'].max())*df_mc['Count']
plt.figure()
ax = df_spec.plot(x='energy',y='Count')
df_mc.plot(x='energy',y='Count', ax = ax)
ax.legend(['Experimental Peaks','Simulated Peaks'])
plt.ylabel('Rate (Hz) / ' +bins+' KeV' )
plt.xlabel('Energy (KeV)')
plt.savefig(plotoutDir+'/mc'+str(channel)+'.png')
plt.close()
return
def plot_ana_rates(channel):
#if chn == 4 or chn == 5:
#return
plt.figure()
df_ana[df_ana.channel==channel].plot(x='time',y='rate',kind='scatter')
plt.savefig(plotoutDir+'/rate_ana_channel'+str(channel)+'.png')
plt.close()
return
### Fetches processed and analyzed dataframes from root sources
dataframe = append_dfs()
df_ana = read_root(mostRecentDir.split('mx_b')[0] + "analysis/ANA_" + mostRecentDir.split('/')[-2] + '.root', columns = ['rate','drate','time','channel','e'])
## Energy filter ( energy > 0 KeV )
dataframe = dataframe[dataframe.integral > 0]
for chn in range(0,8):
plot_spectra(dataframe,chn)
plot_log_spectra(dataframe,chn)
|
import os
import copy
import math
import numpy as np
import basis.robot_math as rm
import robot_sim._kinematics.jlchain as jl
import robot_sim.manipulators.manipulator_interface as mi
class IRB14050(mi.ManipulatorInterface):
def __init__(self, pos=np.zeros(3), rotmat=np.eye(3), homeconf=np.zeros(7), name='irb14050', enable_cc=True):
super().__init__(pos=pos, rotmat=rotmat, name=name)
this_dir, this_filename = os.path.split(__file__)
self.jlc = jl.JLChain(pos=pos, rotmat=rotmat, homeconf=homeconf, name=name)
# seven joints, n_jnts = 7+2 (tgt ranges from 1-7), nlinks = 7+1
jnt_safemargin = math.pi / 18.0
# self.jlc.jnts[1]['loc_pos'] = np.array([0.05355, -0.0725, 0.41492])
# self.jlc.jnts[1]['loc_rotmat'] = rm.rotmat_from_euler(-0.9795, -0.5682, -2.3155)
self.jlc.jnts[1]['loc_pos'] = np.array([0., 0., 0.])
self.jlc.jnts[1]['motion_rng'] = [-2.94087978961 + jnt_safemargin, 2.94087978961 - jnt_safemargin]
self.jlc.jnts[2]['loc_pos'] = np.array([0.03, 0.0, 0.1])
self.jlc.jnts[2]['loc_rotmat'] = rm.rotmat_from_euler(1.57079632679, 0.0, 0.0)
self.jlc.jnts[2]['motion_rng'] = [-2.50454747661 + jnt_safemargin, 0.759218224618 - jnt_safemargin]
self.jlc.jnts[3]['loc_pos'] = np.array([-0.03, 0.17283, 0.0])
self.jlc.jnts[3]['loc_rotmat'] = rm.rotmat_from_euler(-1.57079632679, 0.0, 0.0)
self.jlc.jnts[3]['motion_rng'] = [-2.94087978961 + jnt_safemargin, 2.94087978961 - jnt_safemargin]
self.jlc.jnts[4]['loc_pos'] = np.array([-0.04188, 0.0, 0.07873])
self.jlc.jnts[4]['loc_rotmat'] = rm.rotmat_from_euler(1.57079632679, -1.57079632679, 0.0)
self.jlc.jnts[4]['motion_rng'] = [-2.15548162621 + jnt_safemargin, 1.3962634016 - jnt_safemargin]
self.jlc.jnts[5]['loc_pos'] = np.array([0.0405, 0.16461, 0.0])
self.jlc.jnts[5]['loc_rotmat'] = rm.rotmat_from_euler(-1.57079632679, 0.0, 0.0)
self.jlc.jnts[5]['motion_rng'] = [-5.06145483078 + jnt_safemargin, 5.06145483078 - jnt_safemargin]
self.jlc.jnts[6]['loc_pos'] = np.array([-0.027, 0, 0.10039])
self.jlc.jnts[6]['loc_rotmat'] = rm.rotmat_from_euler(1.57079632679, 0.0, 0.0)
self.jlc.jnts[6]['motion_rng'] = [-1.53588974176 + jnt_safemargin, 2.40855436775 - jnt_safemargin]
self.jlc.jnts[7]['loc_pos'] = np.array([0.027, 0.029, 0.0])
self.jlc.jnts[7]['loc_rotmat'] = rm.rotmat_from_euler(-1.57079632679, 0.0, 0.0)
self.jlc.jnts[7]['motion_rng'] = [-3.99680398707 + jnt_safemargin, 3.99680398707 - jnt_safemargin]
# links
self.jlc.lnks[1]['name'] = "link_1"
self.jlc.lnks[1]['mesh_file'] = os.path.join(this_dir, "meshes", "link_1.stl")
self.jlc.lnks[1]['rgba'] = [.5, .5, .5, 1]
self.jlc.lnks[2]['name'] = "link_2"
self.jlc.lnks[2]['mesh_file'] = os.path.join(this_dir, "meshes", "link_2.stl")
self.jlc.lnks[2]['rgba'] = [.929, .584, .067, 1]
self.jlc.lnks[3]['name'] = "link_3"
self.jlc.lnks[3]['mesh_file'] = os.path.join(this_dir, "meshes", "link_3.stl")
self.jlc.lnks[3]['rgba'] = [.7, .7, .7, 1]
self.jlc.lnks[4]['name'] = "link_4"
self.jlc.lnks[4]['mesh_file'] = os.path.join(this_dir, "meshes", "link_4.stl")
self.jlc.lnks[4]['rgba'] = [0.180, .4, 0.298, 1]
self.jlc.lnks[5]['name'] = "link_5"
self.jlc.lnks[5]['mesh_file'] = os.path.join(this_dir, "meshes", "link_5.stl")
self.jlc.lnks[5]['rgba'] = [.7, .7, .7, 1]
self.jlc.lnks[6]['name'] = "link_6"
self.jlc.lnks[6]['mesh_file'] = os.path.join(this_dir, "meshes", "link_6.stl")
self.jlc.lnks[6]['rgba'] = [0.180, .4, 0.298, 1]
self.jlc.lnks[7]['name'] = "link_7"
# self.jlc.lnks[7]['mesh_file'] = os.path.join(this_dir, "meshes", "link_7.stl") # not really needed to visualize
# self.jlc.lnks[7]['rgba'] = [.5,.5,.5,1]
# reinitialization
self.jlc.reinitialize()
# collision detection
if enable_cc:
self.enable_cc()
def enable_cc(self):
super().enable_cc()
self.cc.add_cdlnks(self.jlc, [1, 2, 3, 4, 5, 6])
activelist = [self.jlc.lnks[1],
self.jlc.lnks[2],
self.jlc.lnks[3],
self.jlc.lnks[4],
self.jlc.lnks[5],
self.jlc.lnks[6]]
self.cc.set_active_cdlnks(activelist)
fromlist = [self.jlc.lnks[1]]
intolist = [self.jlc.lnks[5],
self.jlc.lnks[6]]
self.cc.set_cdpair(fromlist, intolist)
if __name__ == '__main__':
import copy
import time
import copy
import visualization.panda.world as wd
import modeling.geometric_model as gm
base = wd.World(cam_pos=[1, 0, 1], lookat_pos=[0, 0, 0])
gm.gen_frame().attach_to(base)
manipulator_instance = IRB14050(enable_cc=True)
manipulator_instance.fk(
jnt_values=[0, 0, manipulator_instance.jnts[3]['motion_rng'][1] / 2, manipulator_instance.jnts[4]['motion_rng'][1], 0, 0, 0])
manipulator_meshmodel = manipulator_instance.gen_meshmodel()
manipulator_meshmodel.attach_to(base)
manipulator_instance.gen_stickmodel().attach_to(base)
manipulator_instance.show_cdprimit()
tic = time.time()
print(manipulator_instance.is_collided())
toc = time.time()
print(toc - tic)
manipulator_instance2 = manipulator_instance.copy()
# manipulator_instance2.disable_localcc()
manipulator_instance2.fix_to(pos=np.array([.2, .2, 0.2]), rotmat=np.eye(3))
manipulator_instance2.fk(
jnt_values=[0, 0, manipulator_instance.jnts[3]['motion_rng'][1] / 2, manipulator_instance.jnts[4]['motion_rng'][1]*1.1,
manipulator_instance.jnts[5]['motion_rng'][1], 0, 0])
manipulator_meshmodel2 = manipulator_instance2.gen_meshmodel()
manipulator_meshmodel2.attach_to(base)
manipulator_instance2.show_cdprimit()
tic = time.time()
print(manipulator_instance2.is_collided())
toc = time.time()
print(toc - tic)
base.run()
|
# -*- coding: utf-8 -*-
from collections import namedtuple
from inspect import isclass
attr_type_to_column = {
int: "ival",
float: "fval",
basestring: "tval",
str: "tval",
# TODO: add other python subtype of basestring.
bool: "bval",
}
_op_val_typecheck = {
"==": [basestring, int, float, bool],
"=": [basestring, int, float, bool],
"!=": [basestring, int, float, bool],
"<=": [int, float],
">=": [int, float],
">": [int, float],
"<": [int, float],
}
def valid_op_val(op, val):
return any(map(lambda t: isinstance(val, t), _op_val_typecheck[op]))
class Attribute(namedtuple('Attribute', ['key', 'op', 'value'])):
def __new__(cls, key, op=None, value=None):
# xor to see if either op or value alone wasn't provided.
if bool(op is None) ^ bool(value is None):
raise ValueError("You need to supply both the operator and the "
+ "value as parameters or none of them.")
check_args(key, instance=basestring, exception=True)
if op and value and not valid_op_val(op, value):
raise ValueError("Incompatiblity between op {} and value {}.".
format(op, value))
inst = super(Attribute, cls).__new__(cls, key, op, value)
if value is not None:
column = attr_type_to_column[type(value)]
setattr(inst, 'column', column)
return inst
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def check_args(arguments, instance=[], subclass=[], exception=False):
if not isinstance(arguments, (list, tuple)):
arguments = [arguments]
if not isinstance(instance, list):
instance = [instance]
if not isinstance(subclass, list):
subclass = [subclass]
def checker(var):
instance_check = map(lambda instance: isinstance(var, instance),
instance)
subclass_check = []
if isclass(var):
subclass_check = map(lambda sub: issubclass(var, sub), subclass)
return any(instance_check + subclass_check)
results = map(checker, arguments)
correct = all(results)
if not correct and exception:
error_string = ", ".join(map(lambda e: e.__name__, instance + subclass))
raise TypeError("The arguments need to be of the following type: {}".
format(error_string))
return correct
|
is_valid = True
while is_valid:
password = int(input())
if password == 2002:
print("Acesso Permitido")
is_valid =False
else:
print("Senha Invalida")
|
import torch
from torch import nn
# import torch.nn.functional as F
from train.helpers import *
# inspired by fastai course
class BCE_Loss(nn.Module):
def __init__(self, n_classes, device):
super().__init__()
self.n_classes = n_classes
self.device = device
self.id2idx = {1: 0, 3: 1, 100: 2}
def forward(self, pred, targ):
'''
pred - tensor of shape anchors x n_classes
targ - tensor of shape anchors
'''
t = []
for clas_id in targ:
bg = [0] * self.n_classes
bg[self.id2idx[clas_id.item()]] = 1
t.append(bg)
t = torch.FloatTensor(t).to(self.device)
weight=self.get_weight(pred, t)
return torch.nn.functional.binary_cross_entropy_with_logits(pred, t, weight)
def get_weight(self, x, t):
alpha, gamma = 0.9, 2.
p = x.detach()
# confidence of prediction
pt = p*t + (1-p)*(1-t)
# non-background / background weight
w = alpha*t + (1-alpha)*(1-t)
# complete weighing factor
return w * ((1-pt).pow(gamma))
def ssd_1_loss(pred_bbox, pred_class, gt_bbox, gt_class, anchors, grid_sizes, device):
# make network outputs same as gt bbox format
pred_bbox = actn_to_bb(pred_bbox, anchors, grid_sizes)
# compute IOU for obj x anchor
overlaps = jaccard(gt_bbox, anchors)
# map each anchor to the highest IOU obj, gt_idx - ids of mapped objects
matched_gt_bbox, matched_gt_class_ids, pos_idx = map_to_ground_truth(
overlaps, gt_bbox, gt_class)
loc_loss = ((pred_bbox[pos_idx] - matched_gt_bbox).abs()).mean()
loss_f = BCE_Loss(3, device)
class_loss = loss_f(pred_class, matched_gt_class_ids)
return loc_loss, class_loss
def ssd_loss(pred, targ, anchors, grid_sizes, device, params):
'''
args: pred - model output - two tensors of dim anchors x 4 and anchors x n_classes in a list
targ - ground truth - two tensors of dim #obj x 4 and #obj in a list
anchors will be mappend to overlapping GT bboxes, thus feature map cells corresponding to those anchors will have to predict those gt bboxes
'''
localization_loss, classification_loss = 0., 0.
# computes the loss for each image in the batch
for idx in range(pred[0]):
pred_bbox, pred_class = pred[0][idx], pred[1][idx]
gt_bbox, gt_class = targ[0][idx].to(device), targ[1][idx].to(device)
l_loss, c_loss = ssd_1_loss(pred_bbox, pred_class, gt_bbox,
gt_class, anchors, grid_sizes, device)
localization_loss += l_loss
classification_loss += c_loss
return localization_loss, classification_loss
|
import pytest
from django.core.exceptions import ImproperlyConfigured
from esteid.constants import Languages
@pytest.mark.parametrize(
"lang_code, result",
[
*[(code.upper(), code) for code in Languages.ALL],
*[(code.lower(), code) for code in Languages.ALL],
*[(alpha2.upper(), code) for alpha2, code in Languages._MAP_ISO_639_1_TO_MID.items()],
*[(alpha2.lower(), code) for alpha2, code in Languages._MAP_ISO_639_1_TO_MID.items()],
# These tests duplicate the ones above, just for clarity here.
("et", "EST"),
("est", "EST"),
("ET", "EST"),
("EST", "EST"),
(None, ImproperlyConfigured("Language should be one of .+, got `None`")),
("whatever", ImproperlyConfigured("Language should be one of .+, got `whatever`")),
(object, ImproperlyConfigured("Language should be one of .+, got `<class 'object'>`")),
],
)
def test_languages_identify_language(lang_code, result):
if isinstance(result, Exception):
with pytest.raises(type(result), match=result.args[0]):
Languages.identify_language(lang_code)
else:
assert Languages.identify_language(lang_code) == result
|
# https://oj.leetcode.com/problems/spiral-matrix-ii/
class Solution:
# @return a list of lists of integer
def generateMatrix(self, n):
matrix = [[0]*n for i in xrange(n)]
next = 1
for k in xrange(n/2):
for j in xrange(k, n-1-k):
matrix[k][j] = next
next += 1
for i in xrange(k, n-1-k):
matrix[i][n-1-k] = next
next += 1
for j in xrange(k, n-1-k):
matrix[n-1-k][n-1-j] = next
next += 1
for i in xrange(k, n-1-k):
matrix[n-1-i][k] = next
next += 1
if n % 2 == 1:
matrix[n/2][n/2] = next
return matrix
s = Solution()
print s.generateMatrix(1)
print s.generateMatrix(2)
print s.generateMatrix(3)
|
#writing functions to be called from different notebooks, making the code easier to read
import os
import glob
import pandas as pd
#TODO creat a nice function that saves a file summarizing the data outputs as graphs from cellprofiler
#def DataCheckPlotting():
def ImportData_NUC_CYTO(cp_output_dir):
#use glob to pull the common filename from the csv files, call this the experiment name
#import glob
#import os
exp_name = os.path.basename(glob.glob(cp_output_dir + '/*_Image.csv',recursive=True)[0])[:-10]
print(f'EXPERIMENT NAME: {exp_name}')
#create a folder to export data from the CNN
cnn_export_folder = f"_IMGexportforCNN_{exp_name}"
os.makedirs(cnn_export_folder, exist_ok=True)
#generate the other cellprofiler output filenames
nuc_csv = f"{exp_name}_NUC_DAPI.csv"
cyto_csv = f"{exp_name}_Cytoplasm.csv"
image_csv = f"{exp_name}_Image.csv"
print("\nIMPORTED AND MERGED THE FOLLOWING FILES:", nuc_csv, cyto_csv, image_csv, sep="\n - ")
#import these files as datafames using pandas
#import pandas as pd
#nucleus data
df_nuc = pd.read_csv(os.path.join(cp_output_dir, nuc_csv), na_filter=True)
df_nuc.set_index("ImageNumber", inplace=True)
#cytoplasm data
df_cyto = pd.read_csv(os.path.join(cp_output_dir, cyto_csv), na_filter=True)
df_cyto.set_index("ImageNumber", inplace=True)
#image info
df_image = pd.read_csv(os.path.join(cp_output_dir, image_csv), na_filter=True)
df_image.set_index("ImageNumber", inplace=True)
#then extract only the image urls from this
df_image_url = df_image.filter(regex=r'^URL_', axis=1) #this will select any columns starting with "URL_"
#combine these dataframes together
#merge nucleus data with urls
df_combined_data = df_nuc.merge(df_image_url, left_on='ImageNumber', right_on='ImageNumber', how='outer')
#merge this with cytoplasm data and differentiate columns from the two datasets as "_NUC" and "_CYTO"
df_combined_data = df_combined_data.merge(df_cyto, left_on=["ImageNumber", "ObjectNumber"], right_on=["ImageNumber", "Parent_NUC_DAPI"], how="outer", suffixes=('_NUC', '_CYTO'))
#we can also just look at the raw number of rows in the dataframe to see how many nuclei we've identified
print(f'\nDETECTED NUCLEI: {df_combined_data.shape[0]:,.0f}')
return(df_combined_data, df_image_url,exp_name);
def GenerateIDsandCoords(df_combined_data, Metadata_for_ID):
df_combined_data.reset_index(inplace=True)
df_combined_data.rename(columns={'ObjectNumber_NUC':'NUC_ID'}, inplace=True)
df_combined_data.dropna(subset=['NUC_ID'], inplace=True)
#conversion to string for concatenation
### CODE CAN BE IMPROVED ###
df_combined_data["NUC_ID"] = df_combined_data["NUC_ID"].astype(float).astype(int).astype(str)
df_combined_data["ImageNumber"] = df_combined_data["ImageNumber"].astype(float).astype(int).astype(str)
df_combined_data['Metadata_date_NUC'] = df_combined_data['Metadata_date_NUC'].astype(float).astype(int).astype(str)
df_combined_data['Metadata_experiment_NUC'] = df_combined_data['Metadata_experiment_NUC'].astype(float).astype(int).astype(str)
# TODO: if this is a string and not a number, then make it an float then int then string.
df_combined_data['Metadata_for_ID'] = df_combined_data[Metadata_for_ID].astype(float).astype(int).astype(str)
df_combined_data["Unique_ID"] = df_combined_data['Metadata_experiment_NUC'].str.cat(
df_combined_data[['Metadata_date_NUC', 'Metadata_biorep_NUC','Metadata_for_ID','ImageNumber','NUC_ID']]
, sep="_")
print(f'EXAMPLE IDS: {df_combined_data["Unique_ID"][1]}')
return(df_combined_data);
def IDsandCoords_IMGexport(df_merge_OUTPUT,C1,C2,C3,exp_name):
df_merge_OUTPUT['NUC_x0'] = df_merge_OUTPUT[('Location_Center_X_NUC')]*8 #correcting for downscaling
df_merge_OUTPUT['NUC_y0'] = df_merge_OUTPUT[('Location_Center_Y_NUC')]*8 #correcting for downscaling
df_merge_OUTPUT['CYTO_x0'] = df_merge_OUTPUT[('Location_CenterMassIntensity_X_gB_small')]*8 #correcting for downscaling
df_merge_OUTPUT['CYTO_y0'] = df_merge_OUTPUT[('Location_CenterMassIntensity_Y_gB_small')]*8 #correcting for downscaling
df_merge_OUTPUT['URL_C1'] = df_merge_OUTPUT[C1] #this will be red in imageJ
df_merge_OUTPUT['URL_C2'] = df_merge_OUTPUT[C2] #this will be green in imageJ
df_merge_OUTPUT['URL_C3'] = df_merge_OUTPUT[C3] #this will be blue in imageJ
df_merge_EXPORT= df_merge_OUTPUT[["Unique_ID", 'NUC_x0','NUC_y0','URL_C1','URL_C2','URL_C3','AreaShape_Orientation','CYTO_x0','CYTO_y0']]
df_merge_EXPORT.to_csv(f'{exp_name}_IDsandCoords.csv', index=False)
print(f'Exported Nucleus IDs and Coordinates into a csv named: {exp_name}_IDsandCoords.csv')
|
# http://flask.pocoo.org/docs/1.0/api/#api
# $ export FLASK_APP=flask_demo.py
# $ export FLASK_ENV=development
# $ flask run
# $ flask run -h <ip> -p <port>
# $ python -m flask run
from flask import Flask, request, jsonify, abort
app = Flask(__name__)
@app.errorhandler(400)
def bad_json(error):
return 'Error parsing JSON', 400
# no trailing slash, 404 if client includes
# if trailing slash and client does not include, redirect
@app.route('/hello')
def hello_world():
#print(f"client's user-agent: {request.headers.get('user-agent')}")
app.logger.debug(f"client's user-agent: {request.headers.get('user-agent')}")
return 'Hello, World!'
# variable rules- string, int, float, path, uuid
@app.route('/users/<int:user_id>', methods=['POST', 'GET'])
def show_user(user_id):
if request.method == 'POST':
if request.json is None:
abort(400)
else:
return jsonify(success=True), 201
else:
return jsonify(_id=user_id)
|
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The unregister-cluster command for removing clusters from the Hub."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.container.hub import util as hub_util
from googlecloudsdk.command_lib.util.apis import arg_utils
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
class UnregisterCluster(base.DeleteCommand):
r"""Unregisters a cluster from Google Cloud Platform.
This command unregisters a cluster referenced from a kubeconfig file from
Google Cloud Platform. It also removes the Connect agent installation from the
Cluster.
## EXAMPLES
Unregister a cluster referenced from the default kubeconfig file:
$ {command} --context=my-cluster-context
Unregister a cluster referenced from a specific kubeconfig file:
$ {command} \
--kubeconfig-file=/home/user/custom_kubeconfig \
--context=my-cluster-context
"""
@classmethod
def Args(cls, parser):
hub_util.AddCommonArgs(parser)
def Run(self, args):
project = arg_utils.GetFromNamespace(args, '--project', use_defaults=True)
kube_client = hub_util.KubernetesClient(args)
registered_project = hub_util.GetMembershipCROwnerID(kube_client)
authorized_projects = hub_util.UserAccessibleProjectIDSet()
if registered_project:
if registered_project not in authorized_projects:
raise exceptions.Error(
'The cluster is already registered to [{}], which you are not '
'authorized to access.'.format(registered_project))
elif registered_project != project:
raise exceptions.Error(
'This cluster is registered to another project [{}]. '
'Please unregister this cluster from the correct project:\n\n'
' gcloud {}container hub unregister-cluster --project {} --context {}'
.format(registered_project,
hub_util.ReleaseTrackCommandPrefix(self.ReleaseTrack()),
registered_project, args.context))
if project not in authorized_projects:
raise exceptions.Error(
'The project you are attempting to register with [{}] either '
'doesn\'t exist or you are not authorized to access it.'.format(
project))
uuid = hub_util.GetClusterUUID(kube_client)
try:
registered_membership_project = hub_util.ProjectForClusterUUID(
uuid, [project, registered_project])
except apitools_exceptions.HttpNotFoundError as e:
raise exceptions.Error(
'Could not access Memberships API. Is your project whitelisted for '
'API access? Underlying error: {}'.format(e))
if registered_membership_project and project != registered_membership_project:
raise exceptions.Error(
'This cluster is registered to another project [{}]. '
'Please unregister this cluster from the appropriate project:\n\n'
' gcloud {}container hub unregister-cluster --project {} --context {}'
.format(registered_membership_project,
hub_util.ReleaseTrackCommandPrefix(self.ReleaseTrack()),
registered_membership_project, args.context))
if not registered_membership_project:
log.status.Print(
'Membership for [{}] was not found. It may already have been '
'deleted, or it may never have existed. You can safely run the '
'`register-cluster` command again for this cluster.'.format(
args.context))
# There is no Membership for this cluster, but there is a Membership CR.
# We can safely remove the Membership CR, so users can register to another
# hub without issue.
if registered_project:
hub_util.DeleteMembershipResources(kube_client)
return
hub_util.DeleteConnectNamespace(args)
try:
name = 'projects/{}/locations/global/memberships/{}'.format(project, uuid)
hub_util.DeleteMembership(name)
hub_util.DeleteMembershipResources(kube_client)
except apitools_exceptions.HttpUnauthorizedError as e:
raise exceptions.Error(
'You are not authorized to unregister clusters from project [{}]. '
'Underlying error: {}'.format(project, e))
|
import pandas as pd
import geojson as gj
import plotly.express as px
import urllib.request
import dash
# from dash import dcc
# from dash import html
import dash_core_components as dcc
import dash_html_components as html
app = dash.Dash(__name__)
server = app.server
# # # # # DATA # # # # # #
# loading dataframe
# lopip freeze > requirements.txtading data
data_path = 'https://github.com/sebastian-konicz/covid-dashboard/raw/main/data/interim/vaccination_data/vaccinations_county_20211003.xlsx'
data = pd.read_excel(data_path, engine='openpyxl')
# restricting dataframe
data = data[['teryt', 'powiat', '%_zaszczepieni']]
# reshaping teryt
data['teryt'] = data['teryt'].apply(lambda x: str(x).zfill(4))
# loading geojson
jsonurl = 'https://github.com/sebastian-konicz/covid-dashboard/raw/main/data/interim/geo/geo_county.geojson'
with urllib.request.urlopen(jsonurl) as url:
geojson = gj.load(url)
# with open(jsonurl) as file:
# geojson = gj.load(file)
# geojson = gj.load(geo_path)
# get the maximum value to cap displayed values
max_log = data['%_zaszczepieni'].max()
min_val = data['%_zaszczepieni'].min()
max_val = int(max_log) + 1
fig = px.choropleth_mapbox(data,
geojson=geojson,
featureidkey='properties.JPT_KOD_JE',
locations='teryt',
color='%_zaszczepieni',
color_continuous_scale=px.colors.diverging.RdBu,
range_color=(min_val, max_val),
mapbox_style="carto-positron",
zoom=5, center={"lat": 52, "lon": 19},
opacity=0.5,
)
fig.update_layout(margin={"r": 0, "t": 0, "l": 0, "b": 0})
fig.show()
# # # # # # LAYOUT # # # # # #
app.layout = html.Div([
html.H1('Mapa szczepień na COVID-19 w Polsce ',
style={'textAlign': 'center'}),
dcc.Graph(
id='example-map',
figure=fig
),
])
if __name__ == '__main__':
app.run_server(debug=True)
|
import numpy as np
print('------------------Aufgabe 1-----------------------')
# strukturiertes Array mit 2 Spalten ( ID int32) und Produktpreis
my_type = [('ID', np.int32), ('Preis', np.float64)]
produkte = np.array([(12345, 670.89),
(34567, 18.99),
(78900, 250.00),
(13579, 999.99)], dtype=my_type)
# ProduktIDs ausgeben
print(produkte['ID'])
# erste Zeile ausgeben
print(produkte[0])
# Artikelpreis in 3ten Zeile ausgeben
print(produkte[2]['Preis'])
print('------------------Aufgabe 2-----------------------')
# neues Array erzeugen
verkaufszahlen = np.array([3, 5, 2, 1])
# Einnahmen für jedes Produkt errechnen
erlöse = produkte['Preis'] * verkaufszahlen
# Ausgabe
print('Erlöse pro Produkt : ', erlöse)
print('Gesamterlös : ', erlöse.sum())
print('------------------Aufgabe 3,4 und 5-----------------------')
time_temp_type = np.dtype([('time', [('h',int), ('min',int), ('sec',int)]),
('temperature',float)])
# Array füllen und Struktur zuordnen
time_temp=np.array( [((11,42,17),20.8),
((13,19,3),23.2),
((14,50,29),24.6)],dtype=time_temp_type)
print(time_temp)
print(time_temp['time'])
print(time_temp['time']['h'])
print(time_temp['temperature'])
with open("time_temp.csv","w") as fh:
for row in time_temp:
zeit = [f"{el:02d}" for el in row[0]]
zeit = ":".join(zeit)
fh.write(zeit+" "+str(row[1])+"\n")
|
import sed_eval
import utils
import pandas as pd
from sklearn.preprocessing import binarize, MultiLabelBinarizer
import sklearn.metrics as skmetrics
import numpy as np
def get_audio_tagging_df(df):
return df.groupby('filename')['event_label'].unique().reset_index()
def audio_tagging_results(reference, estimated):
"""audio_tagging_results. Returns clip-level F1 Scores
:param reference: The ground truth dataframe as pd.DataFrame
:param estimated: Predicted labels by the model ( thresholded )
"""
if "event_label" in reference.columns:
classes = reference.event_label.dropna().unique().tolist(
) + estimated.event_label.dropna().unique().tolist()
encoder = MultiLabelBinarizer().fit([classes])
reference = get_audio_tagging_df(reference)
estimated = get_audio_tagging_df(estimated)
ref_labels, _ = utils.encode_labels(reference['event_label'],
encoder=encoder)
reference['event_label'] = ref_labels.tolist()
est_labels, _ = utils.encode_labels(estimated['event_label'],
encoder=encoder)
estimated['event_label'] = est_labels.tolist()
matching = reference.merge(estimated,
how='outer',
on="filename",
suffixes=["_ref", "_pred"])
def na_values(val):
if type(val) is np.ndarray:
return val
elif isinstance(val, list):
return np.array(val)
if pd.isna(val):
return np.zeros(len(encoder.classes_))
return val
ret_df = pd.DataFrame(columns=['label', 'f1', 'precision', 'recall'])
if not estimated.empty:
matching['event_label_pred'] = matching.event_label_pred.apply(
na_values)
matching['event_label_ref'] = matching.event_label_ref.apply(na_values)
y_true = np.vstack(matching['event_label_ref'].values)
y_pred = np.vstack(matching['event_label_pred'].values)
ret_df.loc[:, 'label'] = encoder.classes_
for avg in [None, 'macro', 'micro']:
avg_f1 = skmetrics.f1_score(y_true, y_pred, average=avg)
avg_pre = skmetrics.precision_score(y_true, y_pred, average=avg)
avg_rec = skmetrics.recall_score(y_true, y_pred, average=avg)
# avg_auc = skmetrics.roc_auc_score(y_true, y_pred, average=avg)
if avg == None:
# Add for each label non pooled stats
ret_df.loc[:, 'precision'] = avg_pre
ret_df.loc[:, 'recall'] = avg_rec
ret_df.loc[:, 'f1'] = avg_f1
# ret_df.loc[:, 'AUC'] = avg_auc
else:
# Append macro and micro results in last 2 rows
ret_df = ret_df.append(
{
'label': avg,
'precision': avg_pre,
'recall': avg_rec,
'f1': avg_f1,
# 'AUC': avg_auc
},
ignore_index=True)
return ret_df
def get_event_list_current_file(df, fname):
"""
Get list of events for a given filename
:param df: pd.DataFrame, the dataframe to search on
:param fname: the filename to extract the value from the dataframe
:return: list of events (dictionaries) for the given filename
"""
event_file = df[df["filename"] == fname]
if len(event_file) == 1:
if pd.isna(event_file["event_label"].iloc[0]):
event_list_for_current_file = [{"filename": fname}]
else:
event_list_for_current_file = event_file.to_dict('records')
else:
event_list_for_current_file = event_file.to_dict('records')
return event_list_for_current_file
def event_based_evaluation_df(reference,
estimated,
t_collar=0.200,
percentage_of_length=0.2):
"""
Calculate EventBasedMetric given a reference and estimated dataframe
:param reference: pd.DataFrame containing "filename" "onset" "offset" and "event_label" columns which describe the
reference events
:param estimated: pd.DataFrame containing "filename" "onset" "offset" and "event_label" columns which describe the
estimated events to be compared with reference
:return: sed_eval.sound_event.EventBasedMetrics with the scores
"""
evaluated_files = reference["filename"].unique()
classes = []
classes.extend(reference.event_label.dropna().unique())
classes.extend(estimated.event_label.dropna().unique())
classes = list(set(classes))
event_based_metric = sed_eval.sound_event.EventBasedMetrics(
event_label_list=classes,
t_collar=t_collar,
percentage_of_length=percentage_of_length,
empty_system_output_handling='zero_score')
for fname in evaluated_files:
reference_event_list_for_current_file = get_event_list_current_file(
reference, fname)
estimated_event_list_for_current_file = get_event_list_current_file(
estimated, fname)
event_based_metric.evaluate(
reference_event_list=reference_event_list_for_current_file,
estimated_event_list=estimated_event_list_for_current_file,
)
return event_based_metric
def segment_based_evaluation_df(reference, estimated, time_resolution=1.):
evaluated_files = reference["filename"].unique()
classes = []
classes.extend(reference.event_label.dropna().unique())
classes.extend(estimated.event_label.dropna().unique())
classes = list(set(classes))
segment_based_metric = sed_eval.sound_event.SegmentBasedMetrics(
event_label_list=classes, time_resolution=time_resolution)
for fname in evaluated_files:
reference_event_list_for_current_file = get_event_list_current_file(
reference, fname)
estimated_event_list_for_current_file = get_event_list_current_file(
estimated, fname)
segment_based_metric.evaluate(
reference_event_list=reference_event_list_for_current_file,
estimated_event_list=estimated_event_list_for_current_file)
return segment_based_metric
def compute_metrics(valid_df, pred_df, time_resolution=1.):
metric_event = event_based_evaluation_df(valid_df,
pred_df,
t_collar=0.200,
percentage_of_length=0.2)
metric_segment = segment_based_evaluation_df(
valid_df, pred_df, time_resolution=time_resolution)
return metric_event, metric_segment
def roc(y_true, y_pred, average=None):
return skmetrics.roc_auc_score(y_true, y_pred, average=average)
def mAP(y_true, y_pred, average=None):
return skmetrics.average_precision_score(y_true, y_pred, average=average)
def precision_recall_fscore_support(y_true, y_pred, average=None):
return skmetrics.precision_recall_fscore_support(y_true,
y_pred,
average=average)
def tpr_fpr(y_true, y_pred):
fpr, tpr, thresholds = skmetrics.roc_curve(y_true, y_pred)
return fpr, tpr, thresholds
def obtain_error_rates_alt(y_true, y_pred, threshold=0.5):
speech_frame_predictions = binarize(y_pred.reshape(-1, 1),
threshold=threshold)
tn, fp, fn, tp = skmetrics.confusion_matrix(
y_true, speech_frame_predictions).ravel()
p_miss = 100 * (fn / (fn + tp))
p_fa = 100 * (fp / (fp + tn))
return p_fa, p_miss
def confusion_matrix(y_true, y_pred):
return skmetrics.confusion_matrix(y_true, y_pred)
def obtain_error_rates(y_true, y_pred, threshold=0.5):
negatives = y_pred[np.where(y_true == 0)]
positives = y_pred[np.where(y_true == 1)]
Pfa = np.sum(negatives >= threshold) / negatives.size
Pmiss = np.sum(positives < threshold) / positives.size
return Pfa, Pmiss
|
"""
Support for Meteobridge SmartEmbed
This component will read the local weatherstation data
and create Binary sensors for each type defined below.
For a full description, go here: https://github.com/briis/hass-mbweather
Author: Bjarne Riis
"""
import logging
from datetime import timedelta
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
from homeassistant.components.binary_sensor import (
PLATFORM_SCHEMA,
BinarySensorDevice,
)
from homeassistant.const import ATTR_ATTRIBUTION, CONF_MONITORED_CONDITIONS, CONF_NAME
from homeassistant.util import slugify
from . import MBDATA
from .const import (
DOMAIN,
DEFAULT_ATTRIBUTION,
ENTITY_ID_BINARY_SENSOR_FORMAT,
ENTITY_UNIQUE_ID,
)
DEPENDENCIES = ["mbweather"]
_LOGGER = logging.getLogger(__name__)
SENSOR_TYPES = {
"raining": ["Raining", None, "mdi:water", "mdi:water-off"],
"lowbattery": ["Battery Status", None, "mdi:battery-10", "mdi:battery"],
"freezing": ["Freezing", None, "mdi:thermometer-minus", "mdi:thermometer-plus"],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
vol.Optional(CONF_NAME, default=DOMAIN): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, _discovery_info=None):
"""Set up the MBWeather binary sensor platform."""
coordinator = hass.data[MBDATA]["coordinator"]
if not coordinator.data:
return
name = slugify(config.get(CONF_NAME))
sensors = []
for sensor in config[CONF_MONITORED_CONDITIONS]:
sensors.append(MBweatherBinarySensor(coordinator, sensor, name))
_LOGGER.debug("Binary ensor added: %s", sensor)
async_add_entities(sensors, True)
class MBweatherBinarySensor(BinarySensorDevice):
""" Implementation of a MBWeather Binary Sensor. """
def __init__(self, coordinator, sensor, name):
"""Initialize the sensor."""
self.coordinator = coordinator
self._sensor = sensor
self._device_class = SENSOR_TYPES[self._sensor][1]
self.entity_id = ENTITY_ID_BINARY_SENSOR_FORMAT.format(self._sensor)
self._name = SENSOR_TYPES[self._sensor][0]
self._unique_id = ENTITY_UNIQUE_ID.format(slugify(self._name).replace(" ", "_"))
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the sensor."""
return self.coordinator.data[self._sensor] is True
@property
def icon(self):
"""Icon to use in the frontend."""
return (
SENSOR_TYPES[self._sensor][2]
if self.coordinator.data[self._sensor]
else SENSOR_TYPES[self._sensor][3]
)
@property
def device_class(self):
"""Return the device class of the sensor."""
return SENSOR_TYPES[self._sensor][1]
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
attr = {}
attr[ATTR_ATTRIBUTION] = DEFAULT_ATTRIBUTION
return attr
async def async_added_to_hass(self):
"""When entity is added to hass."""
self.coordinator.async_add_listener(self.async_write_ha_state)
async def async_will_remove_from_hass(self):
"""When entity will be removed from hass."""
self.coordinator.async_remove_listener(self.async_write_ha_state)
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import time
import random
from decimal import *
getcontext().prec = 30
def addDot(num):
"""Formats the number into a string and adds a '.' for every thousand (eg. 3000 -> 3.000)
Parameters
----------
num : int
integer number to format
Returns
-------
number : str
a string representing that number with added dots for every thousand
"""
return '{0:,}'.format(int(num)).replace(',','.')
def daysHoursMinutes(totalSeconds):
"""Formats the total number of seconds into days hours minutes (eg. 321454 -> 3D 17H)
Parameters
----------
totalSeconds : int
total number of seconds
Returns
-------
text : str
formatted string (D H M)
"""
if totalSeconds == 0:
return '0 s'
dias = int(totalSeconds / Decimal(86400))
totalSeconds -= dias * Decimal(86400)
horas = int(totalSeconds / Decimal(3600))
totalSeconds -= horas * Decimal(3600)
minutos = int(totalSeconds / Decimal(60))
texto = ''
if dias > 0:
texto = str(dias) + 'D '
if horas > 0:
texto = texto + str(horas) + 'H '
if minutos > 0 and dias == 0:
texto = texto + str(minutos) + 'M '
return texto[:-1]
def wait(seconds, maxrandom = 0):
"""This function will wait the provided number of seconds plus a random number of seconds between 0 and maxrandom
Parameters
-----------
seconds : int
the number of seconds to wait for
maxrandom : int
the maximum number of additional seconds to wait for
"""
randomTime = random.randint(0, maxrandom)
if seconds <= 0:
return
ratio = (1 + 5 ** 0.5) / 2 - 1 # 0.6180339887498949
comienzo = time.time()
fin = comienzo + seconds
restantes = seconds
while restantes > 0:
time.sleep(restantes * ratio)
restantes = fin - time.time()
time.sleep(randomTime)
def getCurrentCityId(session):
"""
Parameters
----------
session : ikabot.web.session.Session
"""
html = session.get()
return re.search(r'currentCityId:\s(\d+),', html).group(1)
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
import sys, os
import numpy as np
import cv2
# This function is modeled off of P/R/F measure as described by Dave et al. (arXiv19)
def multilabel_metrics(prediction, gt, num_classes):
""" Computes F-Measure, Precision, Recall, IoU, #objects detected, #confident objects detected, #GT objects.
It computes these measures only of objects, not background (0)/table (1).
Uses the Hungarian algorithm to match predicted masks with ground truth masks.
A "confident object" is an object that is predicted with more than 0.75 F-measure
@param gt: a [H x W] numpy.ndarray with ground truth masks
@param prediction: a [H x W] numpy.ndarray with predicted masks
@return: a dictionary with the metrics
"""
precisions = np.zeros((num_classes, ), dtype=np.float32)
recalls = np.zeros((num_classes, ), dtype=np.float32)
f1s = np.zeros((num_classes, ), dtype=np.float32)
count = np.zeros((num_classes, ), dtype=np.float32)
# for each class
for cls in range(num_classes):
gt_mask = (gt == cls)
pred_mask = (prediction == cls)
A = np.logical_and(pred_mask, gt_mask)
count_true = np.count_nonzero(A)
count_pred = np.count_nonzero(pred_mask)
count_gt = np.count_nonzero(gt_mask)
# precision
if count_pred > 0:
precisions[cls] = float(count_true) / float(count_pred)
# recall
if count_gt > 0:
recalls[cls] = float(count_true) / float(count_gt)
count[cls] = 1
# F-measure
if precisions[cls] + recalls[cls] != 0:
f1s[cls] = (2 * precisions[cls] * recalls[cls]) / (precisions[cls] + recalls[cls])
return {'F-measure' : f1s,
'Precision' : precisions,
'Recall' : recalls,
'Count': count}
|
# -*- coding: utf-8 -*-
"""Unit test package for talklocal_python."""
|
"""A wrapper class that will hold the column values and recreate a model instance when needed."""
class ModelWrapper:
def __init__(self, model_instance):
self._model_instance = model_instance
for key in self._model_instance.__table__.columns.keys():
this_val = getattr(self._model_instance, key)
setattr(self, key, this_val)
def reconnect_model(self):
self._model_instance
if __name__ == '__main__':
import sqlalchemy as sqla
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, declarative_base
DATABASE_URI = 'sqlite:///:memory:'
engine = create_engine(DATABASE_URI, pool_pre_ping=True)
Base = declarative_base()
class DummyModel(Base):
__tablename__ = '__dummy_table__'
id_ = sqla.Column(primary_key=True, type_=sqla.Integer)
data = sqla.Column(sqla.TEXT)
Base.metadata.create_all(engine)
# original session: create a record, save it to the database, use the ModelWrapper
sesmkr = sessionmaker(bind=engine)
with sesmkr() as session0:
dm0 = DummyModel()
session0.add(dm0)
session0.commit()
wrp0 = ModelWrapper(dm0)
pass # at this point, because of the wrp0(why?), dm0 is detached but accessible
print(f'dm0 detached state: {dm0._sa_instance_state.detached}')
# reattach to new session
with sesmkr() as session1:
dm0.data = 'words'
session1.add(dm0)
session1.commit()
# wrp1 = ModelWrapper(dm0) # without this, will get detached instance error accessing columns
dumdum = dm0 # this is not sufficient
'''sqlalchemy.orm.exc.DetachedInstanceError: Instance <DummyModel at 0x1a7f6e5a550> is not bound to a Session;
attribute refresh operation cannot proceed (Background on this error at: https://sqlalche.me/e/14/bhk3)'''
pass # at this point, since wrp1 is commented out, dm0 is detached but not accessible
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 6 18:29:13 2020
@author: Goutam Dadhich
"""
import sys
INT_MAX = sys.maxsize
# print(INT_MAX)
# Finding Minimum Coins using Recursio
def FindMinCoins_rec(coins, n, amt):
if amt == 0:
return 0
if amt < 0:
return INT_MAX
count = INT_MAX
for i in range(0, n):
res = FindMinCoins_rec(coins, n, amt-coins[i])
if res != INT_MAX:
count = min(res+1, count)
return count
# Finding Minimum Coins using Dynamic programming
def FindMinCoins_dp(coins, n, amt):
T = [INT_MAX]*(amt+1)
T[0] = 0
for i in range(1, amt+1):
res = INT_MAX
for j in range(0, n):
if i-coins[j] >= 0:
res = T[i - coins[j]]
if res != INT_MAX:
T[i] = min(res+1, T[i])
return T[amt]
if __name__ == '__main__':
coins = [1, 2, 3, 5, 7]
n = len(coins)
amt = 15
print('-'*10 + '*'*5 + '-'*10)
print('Using Recurssion:- ')
print('Minimum coins required :- ', FindMinCoins_rec(coins, n, amt))
print('-'*10 + '*'*5 + '-'*10)
print('Using Recurssion:- ')
print('Minimum coins required :- ', FindMinCoins_dp(coins, n, amt))
print('-'*10 + '*'*5 + '-'*10)
|
resp = 's'
cont = soma = 0
list = []
while resp == 's':
num = int(input('digite um número: '))
cont += 1
soma += num
list.append(num)
resp = input('Deseja continuar? [s/n]').lower().strip()
print(f'Você digitou {cont} números\n'
f'A média deles é {soma/cont}\n'
f'O maior número é o {max(list)}\n'
f'O menor número é o {min(list)}\n')
|
import streamlit as st
from is_even_nn import *
st.set_page_config(layout="centered", page_icon="🤖", page_title="Is it even?")
st.title("IsEvenNN")
st.write("A neural network that predicts if a number is even!")
left, right = st.columns(2)
form = left.form("input")
number_input = form.text_input("Your number:")
submit = form.form_submit_button("Submit number")
if submit:
evenNN = IsEvenNN()
evenNN.net.load_state_dict(torch.load("isEvenModel.pt", map_location=torch.device("cpu")))
try:
res, conf = evenNN.predict_single(number_input)
message = f'{number_input} is {"" if res else "not "}even! (confidence {conf:.3f})'
if res:
right.success(message)
else:
right.error(message)
except IndexError as ie:
right.error(f"Failed to process \"{number_input}\" ({ie.__class__.__name__}: {ie})")
except ValueError as ve:
right.error(f"Failed to process \"{number_input}\" ({ve.__class__.__name__}: {ve})")
except Exception as e:
right.error(f"Failed to process \"{number_input}\" (Unknown Error)")
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('estore', '0003_auto_20151029_0013'),
]
operations = [
migrations.RenameField(
model_name='event',
old_name='detail',
new_name='event_detail',
),
migrations.AddField(
model_name='event',
name='event_short',
field=models.CharField(default=b'', max_length=300),
preserve_default=True,
),
migrations.AddField(
model_name='event',
name='map_url',
field=models.CharField(default=b'invalid', max_length=200),
preserve_default=True,
),
]
|
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils import flt, time_diff_in_hours, get_datetime, getdate, today, cint, add_days, get_link_to_form
from frappe import _
from frappe.utils.xlsxutils import make_xlsx
import json
@frappe.whitelist()
def send_statements():
email_list = frappe.db.get_values('Send Customer Statements Customers', {'parent': 'Send Customer Statements'}, ['customer_name','email','send_statement'])
for customer_name, email, send_statement in email_list:
if email is not None:
if send_statement == "Yes":
data = get_report_content(customer_name)
if not data:
return
attachments = [{
'fname': get_file_name(),
'fcontent': data
}]
frappe.sendmail(
recipients = email,
subject = "Customer Statement from Simply Garlic",
message = "Good day. <br> Please find attached your latest statement from Simply Garlic",
attachments = attachments,
reference_doctype = "Report",
reference_name="General Ledger"
)
frappe.msgprint('Emails queued for sending')
def get_report_content(customer_name):
'''Returns file in for the report in given format'''
# Borrowed code from frappe/email/doctype/auto_email_report/auto_email_report.py
report = frappe.get_doc('Report', "General Ledger")
custom_filter = {'company': "Horizon Global SA (Pty) Ltd", 'party_type': "Customer", 'party': customer_name, 'from_date': add_days(today(), -7),'to_date': today(), 'group_by': "Group by Voucher (Consolidated)"}
columns, data = report.get_data(limit=100 or 100, user = "Administrator", filters = custom_filter, as_dict=True)
# add serial numbers
columns.insert(0, frappe._dict(fieldname='idx', label='', width='30px'))
for i in range(len(data)):
data[i]['idx'] = i+1
# For .xlsx
spreadsheet_data = get_spreadsheet_data(columns, data)
xlsx_file = make_xlsx(spreadsheet_data, "Auto Email Report")
return xlsx_file.getvalue()
# For .html
# columns, data = make_links(columns, data)
# return self.get_html_table(columns, data) <-------------Fails here....... see auto_email_report.py
def get_spreadsheet_data(columns, data):
out = [[_(df.label) for df in columns], ]
for row in data:
new_row = []
out.append(new_row)
for df in columns:
if df.fieldname not in row: continue
new_row.append(frappe.format(row[df.fieldname], df, row))
return out
def get_file_name():
return "{0}.{1}".format("Customer Statement".replace(" ", "-").replace("/", "-"), "xlsx")
# return "{0}.{1}".format("Customer Statement".replace(" ", "-").replace("/", "-"), "html")
# def make_links(columns, data):
# for row in data:
# for col in columns:
# if col.fieldtype == "Link" and col.options != "Currency":
# if col.options and row.get(col.fieldname):
# row[col.fieldname] = get_link_to_form(col.options, row[col.fieldname])
# elif col.fieldtype == "Dynamic Link":
# if col.options and row.get(col.fieldname) and row.get(col.options):
# row[col.fieldname] = get_link_to_form(row[col.options], row[col.fieldname])
#
# return columns, data
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AnttechDataServiceBlockchainAccountQueryModel(object):
def __init__(self):
self._account_hash = None
self._account_status = None
self._at_tenant_name = None
self._block_chain_id = None
self._block_hash = None
self._end_timestamp = None
self._page_no = None
self._page_size = None
self._start_timestamp = None
@property
def account_hash(self):
return self._account_hash
@account_hash.setter
def account_hash(self, value):
self._account_hash = value
@property
def account_status(self):
return self._account_status
@account_status.setter
def account_status(self, value):
self._account_status = value
@property
def at_tenant_name(self):
return self._at_tenant_name
@at_tenant_name.setter
def at_tenant_name(self, value):
self._at_tenant_name = value
@property
def block_chain_id(self):
return self._block_chain_id
@block_chain_id.setter
def block_chain_id(self, value):
self._block_chain_id = value
@property
def block_hash(self):
return self._block_hash
@block_hash.setter
def block_hash(self, value):
self._block_hash = value
@property
def end_timestamp(self):
return self._end_timestamp
@end_timestamp.setter
def end_timestamp(self, value):
self._end_timestamp = value
@property
def page_no(self):
return self._page_no
@page_no.setter
def page_no(self, value):
self._page_no = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
@property
def start_timestamp(self):
return self._start_timestamp
@start_timestamp.setter
def start_timestamp(self, value):
self._start_timestamp = value
def to_alipay_dict(self):
params = dict()
if self.account_hash:
if hasattr(self.account_hash, 'to_alipay_dict'):
params['account_hash'] = self.account_hash.to_alipay_dict()
else:
params['account_hash'] = self.account_hash
if self.account_status:
if hasattr(self.account_status, 'to_alipay_dict'):
params['account_status'] = self.account_status.to_alipay_dict()
else:
params['account_status'] = self.account_status
if self.at_tenant_name:
if hasattr(self.at_tenant_name, 'to_alipay_dict'):
params['at_tenant_name'] = self.at_tenant_name.to_alipay_dict()
else:
params['at_tenant_name'] = self.at_tenant_name
if self.block_chain_id:
if hasattr(self.block_chain_id, 'to_alipay_dict'):
params['block_chain_id'] = self.block_chain_id.to_alipay_dict()
else:
params['block_chain_id'] = self.block_chain_id
if self.block_hash:
if hasattr(self.block_hash, 'to_alipay_dict'):
params['block_hash'] = self.block_hash.to_alipay_dict()
else:
params['block_hash'] = self.block_hash
if self.end_timestamp:
if hasattr(self.end_timestamp, 'to_alipay_dict'):
params['end_timestamp'] = self.end_timestamp.to_alipay_dict()
else:
params['end_timestamp'] = self.end_timestamp
if self.page_no:
if hasattr(self.page_no, 'to_alipay_dict'):
params['page_no'] = self.page_no.to_alipay_dict()
else:
params['page_no'] = self.page_no
if self.page_size:
if hasattr(self.page_size, 'to_alipay_dict'):
params['page_size'] = self.page_size.to_alipay_dict()
else:
params['page_size'] = self.page_size
if self.start_timestamp:
if hasattr(self.start_timestamp, 'to_alipay_dict'):
params['start_timestamp'] = self.start_timestamp.to_alipay_dict()
else:
params['start_timestamp'] = self.start_timestamp
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AnttechDataServiceBlockchainAccountQueryModel()
if 'account_hash' in d:
o.account_hash = d['account_hash']
if 'account_status' in d:
o.account_status = d['account_status']
if 'at_tenant_name' in d:
o.at_tenant_name = d['at_tenant_name']
if 'block_chain_id' in d:
o.block_chain_id = d['block_chain_id']
if 'block_hash' in d:
o.block_hash = d['block_hash']
if 'end_timestamp' in d:
o.end_timestamp = d['end_timestamp']
if 'page_no' in d:
o.page_no = d['page_no']
if 'page_size' in d:
o.page_size = d['page_size']
if 'start_timestamp' in d:
o.start_timestamp = d['start_timestamp']
return o
|
def minimum_bribes(q):
q = [p - 1 for p in q]
bribes = 0
for i, p in enumerate(q):
if p - i > 2:
return None
for j in range(max(p - 1, 0), i):
if q[j] > p:
bribes += 1
return bribes
|
import collections
import math
import logging
import torch
import copy
import numpy as np
from naslib.optimizers.core.metaclasses import MetaOptimizer
from naslib.search_spaces.core.query_metrics import Metric
from naslib.utils.utils import AttrDict, count_parameters_in_MB
from naslib.utils.logging import log_every_n_seconds
logger = logging.getLogger(__name__)
class DEHB(MetaOptimizer):
# training the models is not implemented
using_step_function = False
def __init__(self, config):
super().__init__()
# Hyperband related stuff
self.config = config
self.rounds = []
self.round_sizes = []
self.fidelities = []
if self.config.search_space in ["nasbench201", "nasbench211"]:
self.max_budget = 512 # SH according to cosine decay
self.min_budget = 12
self.eta = 3 # defines downsampling rate (default=3)
self.enc_dim = 6
self.max_mutations = 1
self.crossover_prob = 0.5
self.top_n_percent = 0.3
self.mutate_prob = 0.1
elif self.config.search_space == "nasbench101":
self.max_budget = 108
self.min_budget = 4
self.eta = 3 # defines downsampling rate (default=3)
self.enc_dim = 26
self.max_mutations = 1
self.crossover_prob = 0.5
self.top_n_percent = 0.3
self.mutate_prob = 0.1
elif self.config.search_space == "darts":
self.max_budget = 98
self.min_budget = 4
self.eta = 3 # defines downsampling rate (default=3)
self.enc_dim = 32
self.max_mutations = 5
self.crossover_prob = 0.5
self.top_n_percent = 0.3
self.mutate_prob = 0.1
elif self.config.search_space == "nlp":
self.max_budget = 50
self.min_budget = 3
self.eta = 3 # defines downsampling rate (default=3)
self.enc_dim = 185
self.max_mutations = 8
self.crossover_prob = 0.8
self.top_n_percent = 0.3
self.mutate_prob = 0.2
else:
raise NotImplementedError()
self.de = dict()
self._epsilon = 1e-6
self.pop_size = {}
self.counter = 0
s_max = math.floor(math.log(self.max_budget / self.min_budget, self.eta) + self._epsilon)
# set up round sizes, fidelities, and list of arches
for s in reversed(range(s_max + 1)):
self.rounds.append(s)
round_sizes = []
fidelities = []
n = math.ceil((s_max + 1) * self.eta ** s / (s + 1) - self._epsilon) # initial number of configurations
r = self.max_budget / self.eta**s # initial number of iterations to run configurations for
for i in range(s + 1):
n_i = math.floor(n / self.eta ** i + self._epsilon)
r_i = min(math.floor(r * self.eta ** i + self._epsilon), config.search.fidelity)
round_sizes.append(n_i)
fidelities.append(r_i)
self.pop_size[r_i] = self.pop_size.get(r_i, 0) + n_i
self.round_sizes.append(round_sizes)
self.fidelities.append(fidelities)
for budget in self.fidelities[0][1:]:
budget = min(budget, config.search.fidelity)
self.de[budget] = {}
self.de[budget]['promotions'] = collections.deque(maxlen=100)
self.performance_metric = Metric.VAL_ACCURACY
self.dataset = config.dataset
self.history = torch.nn.ModuleList()
self.epochs = self.compute_epochs()
self.current_round = []
self.next_round = []
self.round_number = 0
self.prev_round = 0
self.counter = 0
self.process = 0
def adapt_search_space(self, search_space, scope=None, dataset_api=None):
assert search_space.QUERYABLE, "Hyperband_simple is currently only implemented for benchmarks."
self.search_space = search_space.clone()
self.scope = scope if scope else search_space.OPTIMIZER_SCOPE
self.dataset_api = dataset_api
self.max_training_epoch = self.search_space.get_max_epochs()
def compute_epochs(self):
return self.round_sizes, self.rounds[::-1]
def new_epoch(self, epoch, round, i):
if self.process < i: # re-init for each new process
self.current_round = []
self.next_round = []
self.round_number = 0
self.prev_round = 0
self.counter = 0
self.process = i
if self.prev_round < round: # reset round_number for each new round
self.prev_round = round
self.round_number = 0
if epoch < self.round_sizes[round][0]:
# sample random architectures
model = torch.nn.Module() # hacky way to get arch and accuracy checkpointable
model.arch = self.search_space.clone()
budget = self.fidelities[round][0]
if round == 0:
model.arch.sample_random_architecture(dataset_api=self.dataset_api)
else:
if len(self.de[budget]['promotions']) > 0:
print('promotion from budget: {}, length: {}'.format(budget, len(self.de[budget]['promotions'])))
model = self.de[budget]['promotions'].pop()
model = copy.deepcopy(model)
arch = self.search_space.clone()
arch.mutate(model.arch, dataset_api=self.dataset_api)
model.arch = arch
else:
model.arch.sample_random_architecture(dataset_api=self.dataset_api)
model.epoch = min(self.fidelities[round][0], self.max_training_epoch)
model.accuracy = model.arch.query(self.performance_metric,
self.dataset,
epoch=model.epoch,
dataset_api=self.dataset_api)
self._update_history(model)
self.next_round.append(model)
else:
if len(self.current_round) == 0:
# if we are at the end of a round of hyperband, continue training only the best
logger.info("Starting a new round: continuing to train the best arches")
self.counter = 0
cutoff = math.ceil(self.round_sizes[round][self.round_number] * self.top_n_percent)
self.current_round = sorted(self.next_round, key=lambda x: -x.accuracy)[:cutoff]
self.round_number += 1
self.round_number = min(self.round_number, len(self.fidelities[round]) - 1)
self.next_round = []
# train the next architecture
model = self.current_round.pop()
self.counter += 1
"""
Note: technically we would just continue training this arch, but right now,
just for simplicity, we treat it as if we start to train it again from scratch
"""
model = copy.deepcopy(model)
if np.random.rand(1) < self.mutate_prob:
candidate = model.arch.clone()
for _ in range(self.max_mutations):
arch_ = self.search_space.clone()
arch_.mutate(candidate, dataset_api=self.dataset_api)
candidate = arch_
mutant = candidate
arch = self.search_space.clone()
arch.crossover_bin(model.arch, mutant, self.enc_dim, self.crossover_prob, dataset_api=self.dataset_api)
model.arch = arch
model.epoch = min(self.fidelities[round][self.round_number], self.max_training_epoch)
model.accuracy = model.arch.query(self.performance_metric,
self.dataset,
epoch=model.epoch,
dataset_api=self.dataset_api)
self.de[self.fidelities[round][self.round_number]]['promotions'].append(model)
self._update_history(model)
self.next_round.append(model)
def _update_history(self, child):
self.history.append(child)
def get_final_architecture(self):
# Returns the sampled architecture with the lowest validation error.
best_arch = max(self.history, key=lambda x: x.accuracy)
return best_arch.arch, best_arch.epoch
def get_latest_architecture(self):
# Returns the architecture from the most recent epoch
latest_arch = self.history[-1]
return latest_arch.arch, latest_arch.epoch
def train_statistics(self):
best_arch, best_arch_epoch = self.get_final_architecture()
latest_arch, latest_arch_epoch = self.get_latest_architecture()
return (
best_arch.query(Metric.TRAIN_ACCURACY, self.dataset, dataset_api=self.dataset_api, epoch=best_arch_epoch-1),
best_arch.query(Metric.VAL_ACCURACY, self.dataset, dataset_api=self.dataset_api, epoch=best_arch_epoch),
best_arch.query(Metric.TEST_ACCURACY, self.dataset, dataset_api=self.dataset_api, epoch=best_arch_epoch),
latest_arch.query(Metric.TRAIN_TIME, self.dataset, dataset_api=self.dataset_api, epoch=latest_arch_epoch),
)
def test_statistics(self):
best_arch, epoch = self.get_final_architecture()
return best_arch.query(Metric.RAW, self.dataset, dataset_api=self.dataset_api, epoch=epoch)
def get_op_optimizer(self):
raise NotImplementedError()
def get_checkpointables(self):
return {'model': self.history}
def get_model_size(self):
return count_parameters_in_MB(self.history)
|
# -*- coding: utf-8 -*-
import os
import json
import unicodedata
from sqlalchemy import func
from cornice import Service
from cornice.resource import resource
from .models import AdminZoneFinance, DBSession, AdminZone, Stats as StatsModel, ADMIN_LEVEL_CITY
from .maps import timemap_registry, MAPS_CONFIG
city_search = Service(name='city_search', path='/city_search', description="city search")
@city_search.get()
def get_city(request):
term = request.params['term']
term_ascii = unicodedata.normalize('NFKD', unicode(term)).encode('ascii', 'ignore').lower()
results = DBSession.query(*City.az_columns)\
.filter(AdminZone.name % term_ascii)\
.filter(AdminZone.admin_level==ADMIN_LEVEL_CITY)\
.order_by(func.levenshtein(func.lower(AdminZone.name), term_ascii), AdminZone.population.desc())\
.limit(10).all()
return {'results': [City.format_city_res(res) for res in results]}
@resource(collection_path='/cities', path='/city/{id}')
class City(object):
az_columns = (AdminZone.id, AdminZone.name, AdminZone.code_department,
func.ST_X(func.ST_Centroid(AdminZone.geometry)),
func.ST_Y(func.ST_Centroid(AdminZone.geometry)))
@staticmethod
def format_city_res(result):
return {'id': result[0], 'name': result[1], 'code_department': result[2],
'lng': result[3], 'lat': result[4]}
def __init__(self, request):
self.request = request
def get(self):
id = self.request.matchdict['id']
return {'results': self.format_city_res(DBSession.query(*self.az_columns).filter(AdminZone.id==id).first())}
def collection_get(self):
ids = self.request.params['ids'].split(',')
return {'results': [self.format_city_res(res) for res in DBSession.query(*self.az_columns).filter(AdminZone.id.in_(ids)).all()]}
@resource(collection_path='/timemaps', path='/timemap/{id}')
class TimeMap(object):
def __init__(self, request):
self.request = request
def get(self):
id = self.request.matchdict['id']
return {'results': {'var_name': id, 'maps': [m.info for m in timemap_registry[id]]}}
def collection_get(self):
return {'results': [{'var_name': key, 'maps': [m.info for m in timemap_registry[key]]} for key in sorted(MAPS_CONFIG.keys())]}
@resource(collection_path='/finance', path='/finance/{id}')
class AZFinance(object):
def __init__(self, request):
self.request = request
def get(self):
id = self.request.matchdict['id']
results = DBSession.query(AdminZone.name, AdminZone.code_insee, AdminZone.code_department, AdminZoneFinance.year, AdminZoneFinance.data).join(AdminZoneFinance, AdminZone.id==AdminZoneFinance.adminzone_id).filter(AdminZone.id==id).order_by('year').all()
return {'results': [{'name': res[0], 'year': res[3], 'data': res[4]} for res in results]}
@resource(collection_path='/stats', path='/stat/{id}')
class Stats(object):
def __init__(self, request):
self.request = request
def get(self):
id = self.request.matchdict['id']
stat = DBSession.query(StatsModel).filter(StatsModel.name==id).first()
return {'results': {'mean_by_year': json.loads(stat.data['mean_by_year']), 'var_name': id}}
def collection_get(self):
stats = DBSession.query(StatsModel).all()
return {'results': [{'mean_by_year': json.loads(stat.data['mean_by_year']), 'var_name': stat.name} for stat in stats]}
# XXX: view set for development purpose only
from pyramid.response import FileResponse
def index(request):
html_file = os.path.join(request.registry.settings['client_dir'], 'index.html')
return FileResponse(html_file)
|
import argparse
import json
import logging
import os
import random
import time
import uuid
from kafka import KafkaProducer
CARD_NO= [
"2345796540876432", "7766554433221198", "9856342187654321", "7777744433667790"
, "6538764975321765", "086543226688908"]
TXN_CTRY = [
'SG',
'TH',
'PH',
'MY',
'HK',
'BR',
'US',
'CA',
'IN'
]
POS = [
'9100',
'1234',
'1111'
]
TXN_TYPE = [
'Purchase',
'ATM',
'MOBILE_CHG',
'cardReissue',
'addressChange'
]
MERCH_ID = [
'MERCH1','MERCH2','MERCH3'
]
def generate_event(TXN_TS, CUST, cntr):
millis = int(round(time.time() * 1000))
ret = {
'org': '1',
'product': 'V',
'cardNumber':CARD_NO[random.randint(0,5)],
'txnTS': millis,
'txnCntry': TXN_CTRY[random.randint(0,8)],
'txnType': TXN_TYPE[random.randint(0,4)],
'pos':POS[random.randint(0,2)],
'mcc': 'MCC',
'merchId': MERCH_ID[random.randint(0,2)],
'destCard':CARD_NO[random.randint(0,5)],
'txnAmt': 1000.0,
'transactionId':'TRAN'+str(cntr),
"dataWeight1": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight2": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight3": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight4": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight5": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight6": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight7": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight8": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight9": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight10": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight11": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight12": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight13": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight14": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight15": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight16": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight17": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight18": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight19": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight20": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight21": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight22": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight23": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight24": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight25": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight26": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight27": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight28": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight29": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight30": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight31": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight32": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight33": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight34": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight35": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight36": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight37": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight38": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight39": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight40": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight41": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight42": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight43": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight44": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight45": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight46": "abcdefghijklmnopqrstuvwxyz0123456789",
"dataWeight47": "abcdefghijklmnopqrstuvwxyz0123456789"
}
return ret
def main(args):
TXN_TS = 1562904000000
TXN_INCREMENT = 360000
logging.info('brokers={}'.format(args.brokers))
logging.info('topic={}'.format(args.topic))
logging.info('rate={}'.format(args.rate))
logging.info('creating kafka producer')
producer = KafkaProducer(bootstrap_servers=args.brokers)
logging.info('begin sending events')
cntr=1
while True:
TXN_TS = TXN_TS+TXN_INCREMENT
crdNo = CARD_NO[random.randint(0,5)]
logging.info('TransactionId {0} and Txn Timestamp {1}'.format(cntr,int(round(time.time() * 1000))))
producer.send(args.topic, json.dumps(generate_event(TXN_TS+TXN_INCREMENT,crdNo,cntr)).encode(), json.dumps(crdNo).encode())
producer.send(args.histTopic, json.dumps(generate_event(TXN_TS+TXN_INCREMENT,crdNo,cntr)).encode(), json.dumps(crdNo).encode())
cntr = int(cntr) + 1
time.sleep(1.0 / 100)
def get_arg(env, default):
return os.getenv(env) if os.getenv(env, '') is not '' else default
def parse_args(parser):
args = parser.parse_args()
args.brokers = get_arg('KAFKA_BROKERS', args.brokers)
args.topic = get_arg('KAFKA_TOPIC', args.topic)
args.rate = get_arg('RATE', args.rate)
args.histTopic = get_arg('HIST_TOPIC', args.rate)
return args
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logging.info('starting kafka-openshift-python emitter')
parser = argparse.ArgumentParser(description='emit some stuff on kafka')
parser.add_argument(
'--brokers',
help='The bootstrap servers, env variable KAFKA_BROKERS',
default='localhost:9092')
parser.add_argument(
'--topic',
help='Topic to publish to, env variable KAFKA_TOPIC',
default='event-input-stream')
parser.add_argument(
'--hist-topic',
help='Topic to publish to, env variable KAFKA_TOPIC',
default='hist-input-stream')
parser.add_argument(
'--rate',
type=int,
help='Lines per second, env variable RATE',
default=1)
args = parse_args(parser)
main(args)
logging.info('exiting')
|
# Python Assert
def attendance(days):
assert len(days) !=0, "Days should not be empty"
return 8 * days[0]
print("Total number of hours is ",attendance([20]))
print("Total number of hours is ",attendance([]))
# Output
# Traceback (most recent call last):
# File "py-assert.py", line 8, in <module>
# print("Total number of hours is ",attendance([]))
# File "py-assert.py", line 3, in attendance
# assert len(days) !=0, "Days should not be empty"
# AssertionError: Days should not be empty
|
"""
This Python script executes a series of Python scripts and subroutines that prepare input public use census geography and surname data and constructs the surname-only, georgraphy-only, and BISG proxies for race and ethnicity.
This file is set up to execute the proxy building code sequence on a set of ficitious data constructed by create_test_data.py from the publicly available census surname list and geography data. It is provided to illustrate how the main.py is set up to run the proxy building code.
"""
import os
import surname_creation_lower
import create_attr_over18_all_geo_entities
import surname_parser
import geo_name_merger_all_entities_over_18
def main():
# Identify the input directory that contains the individual or application level data containing name and geocodes.
source_dir = "../input_files"
# Identify the output directory for processing.
out_dir = "../test_output"
# Identify the location of the prepared input census files.
census_data = "../input_files/created_python"
geo_dir = "../input_files/created_python"
# Run the script that prepares the analysis version of the census surname list, including the proportions of individuals by race and ethnicities by surname.
census_surnames_lower = surname_creation_lower.create("../input_files/app_c.csv")
create_attr_over18_all_geo_entities.create(source_dir, census_data)
# Read in the file that defines the program "name_parse" that contains the name standardization routines and merges surname probabilities
# from the census surname list.
# See script for details on arguments that need to be supplied to the program.
surname_probabilities = surname_parser.parse(matchvars=[], app_lname='name1', coapp_lname='name2', output=out_dir, readdir='../test_output', readfile='fictitious_sample_data.pkl', censusdir=census_data)
geo_name_merger_all_entities_over_18.create(output=out_dir, orig_dir=out_dir, orig_file='fictitious_sample_data.pkl', surname_dir=out_dir, surname_file='proxy_name.pkl', orig_surname_match=[], surname_census_match=['zip_sample'], censusdir=census_data, geo_switch=['zip'])
if __name__ == '__main__':
main()
|
from question_model import Question
from data import question_data
from quiz_brain import QuizBrain
question_bank = []
for x in question_data:
next_question = Question(x['question'], x['correct_answer'])
question_bank.append(next_question)
quiz = QuizBrain(question_bank)
while quiz.has_next():
user_input = quiz.next_question()
quiz.print_final_score()
|
# -*- coding: utf-8 -*-
# Program Name: print_cs.py
# Anthony Waldsmith
# 6/13/2016
# Python Version 2.7
# Description: Program to print "CS" ASCII-PUNK style
# Must import print_function because I didn't update to Python 3.x yet.
from __future__ import print_function
# Imports extra utilities that I used to spice up the text print
import time
import sys
# Here I defined a function with a parameter text
def coolprint(text):
# Here it takes each letter (character) from text and sets it to the variable char
for char in text:
# Here we add a short delay
time.sleep(0.01)
# This is essentially the same as print ("text",end=' ') however that function does not handle spaces very well.
# so using this function deeper in python allows me to directly write each character individually.
sys.stdout.write(char)
# This pushes the text out to the stdout stream (in this case the console is the output)
sys.stdout.flush()
print ("*"*55)
coolprint (" CCCCCC SSSS \n")
coolprint (" C S S \n")
coolprint (" C S \n")
coolprint (" C S \n")
coolprint (" C SSSSS \n")
coolprint (" C S \n")
coolprint (" C S \n")
coolprint (" C S S \n")
coolprint (" CCCCC SSSSS \n")
print ("*"*55)
print ("\n")
coolprint ("Computer Science is Cool Stuff!\n")
'''
*******************************************************
CCCCCC SSSS
C S S
C S
C S
C SSSSS
C S
C S
C S S
CCCCC SSSSS
*******************************************************
Computer Science is Cool Stuff!
'''
|
from colorama import init, Fore, Style
class Colors:
def __init__(self):
init()
self.GREEN = Fore.LIGHTGREEN_EX
self.YELLOW = Fore.LIGHTYELLOW_EX
self.RED = Fore.LIGHTRED_EX
self.ENDC = Style.RESET_ALL
class Action:
SCRAPE = 'scrape'
GENERATE = 'generate'
BRUTE_FORCE = 'bruteforce'
PHONE_NUMBER = 'PHONE_NUMBER'
|
#!/usr/bin/env python3
import dataclasses
import pathlib
import typing
import unittest
from unittest.mock import MagicMock, Mock, patch
import numpy
import torch
import bunkai
import bunkai.algorithm.lbd.predict
from bunkai.algorithm.bunkai_sbd.annotator import MorphAnnotatorJanome
from bunkai.base.annotation import Annotations, SpanAnnotation
from bunkai.constant import METACHAR_LINE_BREAK
from bunkai.third.utils_ner import InputFeatures
@dataclasses.dataclass
class ReturnObject(object):
predictions: typing.Union[numpy.ndarray, typing.Tuple[numpy.ndarray, typing.Any]]
label_ids: numpy.ndarray
metrics: typing.Dict[str, float]
labels: typing.List[str]
label_map: typing.Dict[int, str]
tokenizer: typing.List[typing.List[str]]
subwords_lengths: typing.List[typing.List[int]]
dataset_content: typing.List[InputFeatures]
subword_tokens: typing.List[str]
model_type: str
def to_prediction_tuple(self):
return self.predictions, self.label_ids, self.metrics
@dataclasses.dataclass
class NewlineTestCase(object):
text: str
return_value: ReturnObject
@dataclasses.dataclass
class DummyBcObject(object):
max_seq_length = 352
class DummyJanomeSubwordsTokenizer(object):
subword_tokenizer_type = "wordpiece"
do_subword_tokenize: bool = True
never_split = None
unk_token = "[UNK]"
sep_token = "[SEP]"
pad_token = "[PAD]"
cls_token = "[CLS]"
mask_token = "[MASK]"
def func_generate_dummy_bert_prediction() -> ReturnObject:
input_ids = numpy.array(
[
[
-100,
0,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
0,
0,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
]
]
)
bert_prediction_dummy = ReturnObject(
predictions=numpy.array(
[
[
[6.2508187, -4.232818, -2.757059],
[6.1650567, -3.9524226, -2.8288684],
[5.8380218, -3.6578689, -2.6578376],
[6.572264, -3.9870892, -2.8709047],
[6.4894476, -3.9268737, -2.9671426],
[6.216006, -3.7496064, -2.699235],
[6.154592, -3.768798, -2.9045649],
[6.713662, -4.110723, -3.033975],
[6.4546986, -4.302385, -2.8338246],
[6.654306, -4.3293185, -2.6386347],
[6.471306, -4.2099767, -2.675298],
[6.5739822, -4.2123647, -2.610855],
[6.56117, -4.2072744, -2.7977717],
[5.5703177, -3.9187171, -2.5862396],
[-1.884768, 4.4782224, -1.8186623],
[5.8886337, -3.8386352, -2.851794],
[6.1382294, -4.0328712, -2.8980045],
[5.56992, -3.6112833, -2.725328],
[6.1413136, -4.0054746, -3.0763247],
[5.949703, -3.7203593, -2.748416],
[6.490921, -4.10034, -2.882184],
[6.5604143, -4.187022, -2.7890666],
[6.594881, -4.082993, -2.8291895],
[6.5816226, -4.3966985, -2.8815534],
[6.4179454, -4.2702456, -2.9540753],
[5.451252, -3.8440175, -2.5752163],
[5.04419, -3.9542036, -2.1646724],
[3.4635344, -1.968563, -2.199682],
[4.8015337, -2.961069, -2.4906867],
[5.642599, -3.646186, -2.6289954],
[5.2767353, -3.545886, -2.642362],
[5.0331793, -3.3589022, -2.6159847],
[5.4294004, -3.643956, -2.6506023],
[4.5621023, -3.094194, -2.4944196],
[4.1612453, -2.6053162, -2.4269052],
[5.314385, -3.575482, -2.6072056],
[5.7357044, -3.812284, -2.5930557],
[5.507862, -3.702612, -2.5583594],
[4.6684365, -3.2131882, -2.445125],
[3.5338802, -2.0267015, -2.207005],
[5.7340727, -3.7640634, -2.584625],
[5.6197987, -3.716104, -2.5262532],
[5.47165, -3.8265584, -2.683898],
[5.622836, -3.8941417, -2.4897096],
[5.7662735, -3.5816152, -2.5766578],
[6.1038303, -3.778536, -2.5087128],
[5.8932824, -3.5206861, -2.5480444],
[5.9496975, -3.6596575, -2.7018757],
[5.80085, -3.8926811, -2.7254941],
[5.2340307, -3.5927713, -2.7278686],
[5.1017323, -3.3140123, -2.598034],
[5.8979063, -3.754191, -2.4886804],
[6.0222745, -3.7422712, -2.5321903],
[6.018483, -3.754891, -2.6665978],
[5.704626, -3.542376, -2.654388],
[4.6625023, -2.9835098, -2.541861],
[5.851006, -3.9127822, -2.657012],
[5.7435713, -3.8231273, -2.593646],
[5.4913816, -3.65688, -2.581519],
[5.5846696, -3.8045363, -2.5105321],
[5.7596455, -3.856001, -2.5696197],
[5.2555804, -3.6392345, -2.5121176],
[3.876574, -2.5438776, -2.222461],
[3.7917795, -2.0205114, -2.2313485],
[5.4242163, -3.5941799, -2.5328736],
[5.375439, -3.5381823, -2.5158625],
[5.4592915, -3.702542, -2.6168754],
[5.560192, -3.833506, -2.513876],
[5.6088414, -3.6553125, -2.5693743],
[5.9053307, -3.8074007, -2.5359588],
[5.9091473, -3.8741407, -2.577005],
[5.8823833, -3.9749475, -2.7565668],
[5.6488376, -3.8580503, -2.6887958],
[5.2936573, -3.6095896, -2.6221497],
[4.391178, -2.9584074, -2.444511],
[3.6910276, -2.122798, -2.2480416],
[4.749332, -3.0014434, -2.4438312],
[5.4725676, -3.572462, -2.5331836],
[5.383877, -3.459734, -2.5013256],
[4.7639284, -3.1252341, -2.3972554],
[3.0694826, -1.5774779, -2.004921],
[4.904317, -3.1489391, -2.5000887],
[5.3118916, -3.5204654, -2.4224253],
[5.180787, -3.576025, -2.5785341],
[5.114481, -3.4627273, -2.5771754],
[5.3889327, -3.5888848, -2.5031228],
[5.6680975, -3.6847317, -2.5290437],
[5.138223, -3.5371237, -2.4942274],
[4.500623, -3.0357556, -2.3804736],
[4.391722, -2.5864615, -2.312683],
[5.499447, -3.6149004, -2.4913578],
[5.5458784, -3.731234, -2.5449672],
[5.3306437, -3.7202172, -2.6340237],
[5.555006, -3.902042, -2.5700445],
[5.5402975, -3.5568285, -2.5195646],
[5.923624, -3.8101199, -2.5412364],
[5.966515, -3.813748, -2.5135388],
[5.8475056, -3.5183058, -2.591],
[6.0184994, -3.9749384, -2.7445848],
[5.477597, -3.7229378, -2.662457],
[5.360407, -3.6949844, -2.6515026],
[5.664912, -3.8135028, -2.5601358],
[5.4726477, -3.3598118, -2.4900548],
[5.716467, -3.51123, -2.4441009],
[5.9715557, -3.444706, -2.381342],
[5.684905, -3.407822, -2.4511132],
[5.652356, -3.5395107, -2.566182],
[6.086296, -4.0143557, -2.757162],
[5.93272, -3.913489, -2.8516464],
[5.513011, -3.4940288, -2.4365187],
[5.847448, -3.7215633, -2.3947792],
[5.8946314, -3.8241282, -2.54148],
[5.750376, -3.7425175, -2.5400023],
[4.2657037, -2.7419717, -2.2960308],
[2.803505, -1.1151873, -2.0724204],
[5.069028, -3.2909331, -2.334376],
[5.1803617, -3.453929, -2.5205479],
[5.4511433, -3.702659, -2.574565],
[5.3047314, -3.4712682, -2.4875286],
[5.6509314, -3.7447715, -2.5951982],
[4.6699295, -3.1814604, -2.428188],
[3.7801378, -2.4215903, -2.232263],
[5.882105, -3.9000041, -2.7211075],
[5.4890738, -3.6392746, -2.5046637],
[5.5276957, -3.803844, -2.6643615],
[5.5274057, -3.8214302, -2.472117],
[5.697824, -3.462097, -2.537269],
[5.957882, -3.4408717, -2.3871222],
[5.7239294, -3.427033, -2.4772236],
[5.7963157, -3.564792, -2.6691947],
[6.039237, -3.963026, -2.7902536],
[5.920341, -4.0087867, -2.9042587],
[5.5651474, -3.5472107, -2.4891918],
[5.8184776, -3.666483, -2.4670477],
[5.9809966, -3.7833667, -2.6582336],
[5.647328, -3.433617, -2.5941267],
[5.5610843, -3.4891434, -2.6713898],
[5.927872, -3.8813362, -2.7214065],
[5.785965, -3.7625728, -2.7681732],
[5.5795455, -3.5510294, -2.4824677],
[5.658676, -3.582807, -2.3650568],
[5.5469546, -3.691554, -2.5050597],
[4.9479814, -3.3915858, -2.4030986],
[2.3329203, -1.131766, -1.8485751],
[4.9787626, -3.2024384, -2.483747],
[4.911786, -3.1755145, -2.3659158],
[5.1218967, -3.4676068, -2.5503993],
[5.3810396, -3.6829956, -2.4702733],
[5.547564, -3.4910274, -2.545243],
[5.7789097, -3.722993, -2.4507918],
[5.970089, -3.8823137, -2.4802265],
[5.9492865, -4.123867, -2.6572356],
[5.6568136, -3.8770761, -2.729714],
[4.3428807, -2.8855278, -2.3944554],
[3.3280978, -1.8396173, -2.1001978],
[4.402819, -2.6880207, -2.3907475],
[5.33469, -3.4014668, -2.4767218],
[5.344347, -3.4569607, -2.5946865],
[5.16568, -3.5411572, -2.5953364],
[4.793007, -3.216085, -2.473689],
[4.2960186, -2.7347312, -2.3489754],
[3.805702, -2.1488824, -2.2282677],
[4.8418455, -3.0454865, -2.429937],
[5.323112, -3.394518, -2.4346614],
[5.0180244, -3.275909, -2.4122648],
[2.9661903, -1.4233906, -1.8672131],
[4.931182, -3.1389503, -2.4102225],
[4.9693556, -3.3392224, -2.3979883],
[4.7797327, -3.1475272, -2.4123082],
[4.9508667, -3.3479035, -2.4246244],
[5.3461943, -3.5120308, -2.403717],
[4.7816477, -3.1765049, -2.3775845],
[3.5539727, -2.0844865, -2.1079192],
[4.6387863, -2.7948396, -2.327755],
[5.2454753, -3.5064278, -2.4758189],
[5.34525, -3.6765518, -2.5685573],
[5.3302755, -3.6907237, -2.3332164],
[5.5514193, -3.3656187, -2.4497824],
[5.9280806, -3.4156218, -2.3316245],
[5.6186748, -3.4095483, -2.409141],
[5.5813913, -3.4803317, -2.5405798],
[6.018386, -4.0123005, -2.7944303],
[5.791168, -3.8674788, -2.84371],
[5.5173597, -3.5319824, -2.486122],
[5.699461, -3.6407382, -2.38972],
[5.5982647, -3.7332854, -2.5377173],
[5.2531133, -3.6009998, -2.52218],
[3.1787467, -1.9656836, -2.093604],
[4.40965, -2.5246685, -2.265173],
[4.986265, -3.2378364, -2.3466463],
[5.221637, -3.5330796, -2.599038],
[5.4330263, -3.6381497, -2.5219502],
[5.4239407, -3.537006, -2.494265],
[5.7460465, -3.6373107, -2.4902682],
[5.8602858, -3.7709167, -2.4536014],
[6.177119, -4.1630287, -2.6641548],
[6.007184, -4.1391425, -2.7746332],
[5.234189, -3.6090078, -2.6703088],
[4.458186, -2.7931972, -2.487601],
[5.560937, -3.7051048, -2.4895868],
[5.6427107, -3.4239001, -2.5180545],
[5.985802, -3.4383836, -2.3521647],
[5.705908, -3.4290183, -2.4250975],
[5.633841, -3.4536667, -2.5365653],
[6.068409, -4.0221148, -2.7998438],
[5.9640284, -3.9706354, -2.9187305],
[5.5351458, -3.5053978, -2.4735668],
[5.819968, -3.7009068, -2.4335914],
[5.7983685, -3.7754993, -2.5713015],
[5.6510377, -3.7154012, -2.5723357],
[3.1107492, -1.7932931, -2.038206],
[4.6108003, -2.809266, -2.3758132],
[5.129378, -3.334814, -2.3553529],
[5.27525, -3.511778, -2.542399],
[5.553991, -3.7560308, -2.5243082],
[5.502201, -3.4396672, -2.5307422],
[5.932541, -3.7612128, -2.6955643],
[5.511362, -3.666332, -2.6930692],
[5.7192364, -3.4919772, -2.5201082],
[5.9494834, -3.9199066, -2.8064528],
[5.989796, -4.028501, -2.9356284],
[5.6844234, -3.7073665, -2.6559854],
[5.7420044, -3.6841903, -2.4219987],
[6.019269, -3.851921, -2.5968761],
[6.012556, -3.7651906, -2.7302198],
[5.22493, -3.475544, -2.7187457],
[3.7751057, -2.2496824, -2.357636],
[5.0919046, -3.3189614, -2.3565404],
[5.065481, -3.4329944, -2.5660634],
[5.425741, -3.6887774, -2.519134],
[5.4049864, -3.6632752, -2.5491867],
[5.7636952, -3.6867075, -2.525423],
[5.069643, -3.6292334, -2.5354824],
[4.145218, -2.7678344, -2.3718739],
[5.6274185, -3.743704, -2.6409373],
[5.338847, -3.5374503, -2.5367274],
[5.486684, -3.7471037, -2.6338878],
[5.5501976, -3.8444057, -2.492587],
[5.680416, -3.5932336, -2.565421],
[5.806082, -3.7560775, -2.4588132],
[6.016405, -3.9916434, -2.5753489],
[5.8762383, -4.095117, -2.6915278],
[5.441005, -3.7025, -2.6978788],
[3.8021955, -2.3089309, -2.2744963],
[3.295628, -1.7485684, -2.1460679],
[3.8095417, -2.1189125, -2.2619543],
[5.4892044, -3.5187688, -2.5719445],
[5.4801655, -3.616661, -2.614462],
[4.9412575, -3.1965845, -2.5532193],
[5.1379337, -3.4174705, -2.5731788],
[4.9215374, -3.290681, -2.5169702],
[4.978571, -3.3317158, -2.5741048],
[5.4809895, -3.7299604, -2.5985618],
[5.4321156, -3.5919714, -2.5068498],
[4.9712186, -3.3998384, -2.4638414],
[3.131559, -1.8154464, -2.0161948],
[4.20892, -2.32469, -2.24137],
[5.3603983, -3.5648031, -2.6062012],
[4.770118, -3.3103986, -2.570477],
[4.701253, -3.1281867, -2.5695343],
[5.692337, -3.640839, -2.4616065],
[5.45103, -3.6451797, -2.5184312],
[5.107309, -3.4940763, -2.478231],
[2.6783373, -1.4480876, -1.8975571],
[5.1807585, -3.3610148, -2.513155],
[5.0343246, -3.2504637, -2.3884437],
[5.273611, -3.5527153, -2.5969102],
[5.4621787, -3.6608934, -2.5238476],
[5.4608717, -3.5109582, -2.4960275],
[5.82248, -3.7629507, -2.517388],
[5.7347484, -3.8859196, -2.5711641],
[6.112766, -4.150079, -2.6801968],
[5.705344, -3.8973122, -2.7257922],
[4.6984296, -3.2733493, -2.5083308],
[4.3192806, -2.8144884, -2.431129],
[3.3805945, -1.8914232, -2.134581],
[5.3352375, -3.2841783, -2.4582448],
[5.7989273, -3.6409295, -2.5358305],
[5.648154, -3.3823075, -2.5451796],
[5.525385, -3.6856186, -2.5623384],
[5.047093, -3.3972507, -2.5808408],
[4.6922903, -3.1120577, -2.520347],
[4.454125, -2.7146144, -2.4370828],
[5.8160934, -3.7015703, -2.4822454],
[5.842537, -3.7719693, -2.5903497],
[5.584236, -3.6224103, -2.578228],
[4.6949444, -3.0575066, -2.4430642],
[3.7849636, -2.0093102, -2.2221537],
[5.34052, -3.4516811, -2.3648925],
[5.4426284, -3.7016723, -2.6079993],
[5.575471, -3.8038383, -2.6187627],
[5.374604, -3.4713042, -2.472371],
[5.573716, -3.7265248, -2.549485],
[4.758337, -3.2580361, -2.4734373],
[3.182425, -1.8679427, -2.1192076],
[5.5819745, -3.679071, -2.5723608],
[5.2793016, -3.4560566, -2.421187],
[5.5433426, -3.7827773, -2.6599064],
[5.5916505, -3.8679109, -2.5255156],
[5.497374, -3.498955, -2.525767],
[5.8535748, -3.5889792, -2.5111132],
[5.98721, -3.4372945, -2.3722787],
[5.8036013, -3.5048394, -2.530361],
[5.953143, -3.8283353, -2.7150617],
[6.1119184, -4.0932183, -2.832982],
[5.959545, -4.0726967, -2.8936415],
[5.6262956, -3.5740123, -2.5001867],
[5.802416, -3.625824, -2.6191945],
[6.1741114, -3.677545, -2.5701354],
[6.019506, -3.5576932, -2.555736],
[5.670724, -3.441112, -2.5689387],
[5.865163, -3.8021712, -2.7791395],
[6.131137, -4.057909, -2.8261504],
[5.7952104, -3.6973677, -2.644189],
[5.7676497, -3.6922674, -2.5299058],
[5.883764, -3.8301828, -2.5025594],
[5.574148, -3.6859841, -2.5446942],
[4.5494995, -3.0782628, -2.4074235],
[2.3880472, -1.2455968, -1.9546468],
[5.3021474, -3.4626734, -2.5357323],
]
],
dtype=numpy.float32,
),
label_ids=input_ids,
metrics={"eval_loss": 3.2364680767059326},
labels=["O", "LB_SEP", "LB_NS"],
label_map={0: "O", 1: "LB_SEP", 2: "LB_NS"},
tokenizer=[
[
"ラウンジ",
"も",
"気軽",
"に",
"利用",
"でき",
"、",
"申し分",
"ない",
"です",
"。",
"▁",
"ホテル",
"内",
"の",
"部屋",
"も",
"ゆったり",
"でき",
"まし",
"た",
"。",
]
],
subwords_lengths=[[1, 1, 2, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1]],
subword_tokens=[
"[CLS]",
"ラウンジ",
"も",
"気",
"##軽",
"に",
"利用",
"でき",
"、",
"申し",
"##分",
"ない",
"です",
"。",
"▁",
"ホテル",
"内",
"の",
"部屋",
"も",
"ゆ",
"##ったり",
"でき",
"まし",
"た",
"。",
"[SEP]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
],
dataset_content=[
InputFeatures(
input_ids=[
2,
25018,
28,
704,
29505,
7,
666,
203,
6,
4482,
28593,
80,
2992,
8,
1,
3228,
186,
5,
3250,
28,
1223,
21087,
203,
3913,
10,
8,
3,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
label_ids=list(input_ids.tolist()),
attention_mask=[],
token_type_ids=[],
document_id="0-1",
)
],
model_type="bert",
)
return bert_prediction_dummy
def func_generate_dummy_distil_bert_prediction() -> ReturnObject:
input_ids = numpy.array(
[
[
-100,
0,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
0,
0,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
]
]
)
dummy_object = ReturnObject(
predictions=numpy.array(
[
[
[6.2508187, -4.232818, -2.757059],
[6.1650567, -3.9524226, -2.8288684],
[5.8380218, -3.6578689, -2.6578376],
[6.572264, -3.9870892, -2.8709047],
[6.4894476, -3.9268737, -2.9671426],
[6.216006, -3.7496064, -2.699235],
[6.154592, -3.768798, -2.9045649],
[6.713662, -4.110723, -3.033975],
[6.4546986, -4.302385, -2.8338246],
[6.654306, -4.3293185, -2.6386347],
[6.471306, -4.2099767, -2.675298],
[6.5739822, -4.2123647, -2.610855],
[6.56117, -4.2072744, -2.7977717],
[5.5703177, -3.9187171, -2.5862396],
[-1.884768, 4.4782224, -1.8186623],
[5.8886337, -3.8386352, -2.851794],
[6.1382294, -4.0328712, -2.8980045],
[5.56992, -3.6112833, -2.725328],
[6.1413136, -4.0054746, -3.0763247],
[5.949703, -3.7203593, -2.748416],
[6.490921, -4.10034, -2.882184],
[6.5604143, -4.187022, -2.7890666],
[6.594881, -4.082993, -2.8291895],
[6.5816226, -4.3966985, -2.8815534],
[6.4179454, -4.2702456, -2.9540753],
[5.451252, -3.8440175, -2.5752163],
[5.04419, -3.9542036, -2.1646724],
[3.4635344, -1.968563, -2.199682],
[4.8015337, -2.961069, -2.4906867],
[5.642599, -3.646186, -2.6289954],
[5.2767353, -3.545886, -2.642362],
[5.0331793, -3.3589022, -2.6159847],
[5.4294004, -3.643956, -2.6506023],
[4.5621023, -3.094194, -2.4944196],
[4.1612453, -2.6053162, -2.4269052],
[5.314385, -3.575482, -2.6072056],
[5.7357044, -3.812284, -2.5930557],
[5.507862, -3.702612, -2.5583594],
[4.6684365, -3.2131882, -2.445125],
[3.5338802, -2.0267015, -2.207005],
[5.7340727, -3.7640634, -2.584625],
[5.6197987, -3.716104, -2.5262532],
[5.47165, -3.8265584, -2.683898],
[5.622836, -3.8941417, -2.4897096],
[5.7662735, -3.5816152, -2.5766578],
[6.1038303, -3.778536, -2.5087128],
[5.8932824, -3.5206861, -2.5480444],
[5.9496975, -3.6596575, -2.7018757],
[5.80085, -3.8926811, -2.7254941],
[5.2340307, -3.5927713, -2.7278686],
[5.1017323, -3.3140123, -2.598034],
[5.8979063, -3.754191, -2.4886804],
[6.0222745, -3.7422712, -2.5321903],
[6.018483, -3.754891, -2.6665978],
[5.704626, -3.542376, -2.654388],
[4.6625023, -2.9835098, -2.541861],
[5.851006, -3.9127822, -2.657012],
[5.7435713, -3.8231273, -2.593646],
[5.4913816, -3.65688, -2.581519],
[5.5846696, -3.8045363, -2.5105321],
[5.7596455, -3.856001, -2.5696197],
[5.2555804, -3.6392345, -2.5121176],
[3.876574, -2.5438776, -2.222461],
[3.7917795, -2.0205114, -2.2313485],
[5.4242163, -3.5941799, -2.5328736],
[5.375439, -3.5381823, -2.5158625],
[5.4592915, -3.702542, -2.6168754],
[5.560192, -3.833506, -2.513876],
[5.6088414, -3.6553125, -2.5693743],
[5.9053307, -3.8074007, -2.5359588],
[5.9091473, -3.8741407, -2.577005],
[5.8823833, -3.9749475, -2.7565668],
[5.6488376, -3.8580503, -2.6887958],
[5.2936573, -3.6095896, -2.6221497],
[4.391178, -2.9584074, -2.444511],
[3.6910276, -2.122798, -2.2480416],
[4.749332, -3.0014434, -2.4438312],
[5.4725676, -3.572462, -2.5331836],
[5.383877, -3.459734, -2.5013256],
[4.7639284, -3.1252341, -2.3972554],
[3.0694826, -1.5774779, -2.004921],
[4.904317, -3.1489391, -2.5000887],
[5.3118916, -3.5204654, -2.4224253],
[5.180787, -3.576025, -2.5785341],
[5.114481, -3.4627273, -2.5771754],
[5.3889327, -3.5888848, -2.5031228],
[5.6680975, -3.6847317, -2.5290437],
[5.138223, -3.5371237, -2.4942274],
[4.500623, -3.0357556, -2.3804736],
[4.391722, -2.5864615, -2.312683],
[5.499447, -3.6149004, -2.4913578],
[5.5458784, -3.731234, -2.5449672],
[5.3306437, -3.7202172, -2.6340237],
[5.555006, -3.902042, -2.5700445],
[5.5402975, -3.5568285, -2.5195646],
[5.923624, -3.8101199, -2.5412364],
[5.966515, -3.813748, -2.5135388],
[5.8475056, -3.5183058, -2.591],
[6.0184994, -3.9749384, -2.7445848],
[5.477597, -3.7229378, -2.662457],
[5.360407, -3.6949844, -2.6515026],
[5.664912, -3.8135028, -2.5601358],
[5.4726477, -3.3598118, -2.4900548],
[5.716467, -3.51123, -2.4441009],
[5.9715557, -3.444706, -2.381342],
[5.684905, -3.407822, -2.4511132],
[5.652356, -3.5395107, -2.566182],
[6.086296, -4.0143557, -2.757162],
[5.93272, -3.913489, -2.8516464],
[5.513011, -3.4940288, -2.4365187],
[5.847448, -3.7215633, -2.3947792],
[5.8946314, -3.8241282, -2.54148],
[5.750376, -3.7425175, -2.5400023],
[4.2657037, -2.7419717, -2.2960308],
[2.803505, -1.1151873, -2.0724204],
[5.069028, -3.2909331, -2.334376],
[5.1803617, -3.453929, -2.5205479],
[5.4511433, -3.702659, -2.574565],
[5.3047314, -3.4712682, -2.4875286],
[5.6509314, -3.7447715, -2.5951982],
[4.6699295, -3.1814604, -2.428188],
[3.7801378, -2.4215903, -2.232263],
[5.882105, -3.9000041, -2.7211075],
[5.4890738, -3.6392746, -2.5046637],
[5.5276957, -3.803844, -2.6643615],
[5.5274057, -3.8214302, -2.472117],
[5.697824, -3.462097, -2.537269],
[5.957882, -3.4408717, -2.3871222],
[5.7239294, -3.427033, -2.4772236],
[5.7963157, -3.564792, -2.6691947],
[6.039237, -3.963026, -2.7902536],
[5.920341, -4.0087867, -2.9042587],
[5.5651474, -3.5472107, -2.4891918],
[5.8184776, -3.666483, -2.4670477],
[5.9809966, -3.7833667, -2.6582336],
[5.647328, -3.433617, -2.5941267],
[5.5610843, -3.4891434, -2.6713898],
[5.927872, -3.8813362, -2.7214065],
[5.785965, -3.7625728, -2.7681732],
[5.5795455, -3.5510294, -2.4824677],
[5.658676, -3.582807, -2.3650568],
[5.5469546, -3.691554, -2.5050597],
[4.9479814, -3.3915858, -2.4030986],
[2.3329203, -1.131766, -1.8485751],
[4.9787626, -3.2024384, -2.483747],
[4.911786, -3.1755145, -2.3659158],
[5.1218967, -3.4676068, -2.5503993],
[5.3810396, -3.6829956, -2.4702733],
[5.547564, -3.4910274, -2.545243],
[5.7789097, -3.722993, -2.4507918],
[5.970089, -3.8823137, -2.4802265],
[5.9492865, -4.123867, -2.6572356],
[5.6568136, -3.8770761, -2.729714],
[4.3428807, -2.8855278, -2.3944554],
[3.3280978, -1.8396173, -2.1001978],
[4.402819, -2.6880207, -2.3907475],
[5.33469, -3.4014668, -2.4767218],
[5.344347, -3.4569607, -2.5946865],
[5.16568, -3.5411572, -2.5953364],
[4.793007, -3.216085, -2.473689],
[4.2960186, -2.7347312, -2.3489754],
[3.805702, -2.1488824, -2.2282677],
[4.8418455, -3.0454865, -2.429937],
[5.323112, -3.394518, -2.4346614],
[5.0180244, -3.275909, -2.4122648],
[2.9661903, -1.4233906, -1.8672131],
[4.931182, -3.1389503, -2.4102225],
[4.9693556, -3.3392224, -2.3979883],
[4.7797327, -3.1475272, -2.4123082],
[4.9508667, -3.3479035, -2.4246244],
[5.3461943, -3.5120308, -2.403717],
[4.7816477, -3.1765049, -2.3775845],
[3.5539727, -2.0844865, -2.1079192],
[4.6387863, -2.7948396, -2.327755],
[5.2454753, -3.5064278, -2.4758189],
[5.34525, -3.6765518, -2.5685573],
[5.3302755, -3.6907237, -2.3332164],
[5.5514193, -3.3656187, -2.4497824],
[5.9280806, -3.4156218, -2.3316245],
[5.6186748, -3.4095483, -2.409141],
[5.5813913, -3.4803317, -2.5405798],
[6.018386, -4.0123005, -2.7944303],
[5.791168, -3.8674788, -2.84371],
[5.5173597, -3.5319824, -2.486122],
[5.699461, -3.6407382, -2.38972],
[5.5982647, -3.7332854, -2.5377173],
[5.2531133, -3.6009998, -2.52218],
[3.1787467, -1.9656836, -2.093604],
[4.40965, -2.5246685, -2.265173],
[4.986265, -3.2378364, -2.3466463],
[5.221637, -3.5330796, -2.599038],
[5.4330263, -3.6381497, -2.5219502],
[5.4239407, -3.537006, -2.494265],
[5.7460465, -3.6373107, -2.4902682],
[5.8602858, -3.7709167, -2.4536014],
[6.177119, -4.1630287, -2.6641548],
[6.007184, -4.1391425, -2.7746332],
[5.234189, -3.6090078, -2.6703088],
[4.458186, -2.7931972, -2.487601],
[5.560937, -3.7051048, -2.4895868],
[5.6427107, -3.4239001, -2.5180545],
[5.985802, -3.4383836, -2.3521647],
[5.705908, -3.4290183, -2.4250975],
[5.633841, -3.4536667, -2.5365653],
[6.068409, -4.0221148, -2.7998438],
[5.9640284, -3.9706354, -2.9187305],
[5.5351458, -3.5053978, -2.4735668],
[5.819968, -3.7009068, -2.4335914],
[5.7983685, -3.7754993, -2.5713015],
[5.6510377, -3.7154012, -2.5723357],
[3.1107492, -1.7932931, -2.038206],
[4.6108003, -2.809266, -2.3758132],
[5.129378, -3.334814, -2.3553529],
[5.27525, -3.511778, -2.542399],
[5.553991, -3.7560308, -2.5243082],
[5.502201, -3.4396672, -2.5307422],
[5.932541, -3.7612128, -2.6955643],
[5.511362, -3.666332, -2.6930692],
[5.7192364, -3.4919772, -2.5201082],
[5.9494834, -3.9199066, -2.8064528],
[5.989796, -4.028501, -2.9356284],
[5.6844234, -3.7073665, -2.6559854],
[5.7420044, -3.6841903, -2.4219987],
[6.019269, -3.851921, -2.5968761],
[6.012556, -3.7651906, -2.7302198],
[5.22493, -3.475544, -2.7187457],
[3.7751057, -2.2496824, -2.357636],
[5.0919046, -3.3189614, -2.3565404],
[5.065481, -3.4329944, -2.5660634],
[5.425741, -3.6887774, -2.519134],
[5.4049864, -3.6632752, -2.5491867],
[5.7636952, -3.6867075, -2.525423],
[5.069643, -3.6292334, -2.5354824],
[4.145218, -2.7678344, -2.3718739],
[5.6274185, -3.743704, -2.6409373],
[5.338847, -3.5374503, -2.5367274],
[5.486684, -3.7471037, -2.6338878],
[5.5501976, -3.8444057, -2.492587],
[5.680416, -3.5932336, -2.565421],
[5.806082, -3.7560775, -2.4588132],
[6.016405, -3.9916434, -2.5753489],
[5.8762383, -4.095117, -2.6915278],
[5.441005, -3.7025, -2.6978788],
[3.8021955, -2.3089309, -2.2744963],
[3.295628, -1.7485684, -2.1460679],
[3.8095417, -2.1189125, -2.2619543],
[5.4892044, -3.5187688, -2.5719445],
[5.4801655, -3.616661, -2.614462],
[4.9412575, -3.1965845, -2.5532193],
[5.1379337, -3.4174705, -2.5731788],
[4.9215374, -3.290681, -2.5169702],
[4.978571, -3.3317158, -2.5741048],
[5.4809895, -3.7299604, -2.5985618],
[5.4321156, -3.5919714, -2.5068498],
[4.9712186, -3.3998384, -2.4638414],
[3.131559, -1.8154464, -2.0161948],
[4.20892, -2.32469, -2.24137],
[5.3603983, -3.5648031, -2.6062012],
[4.770118, -3.3103986, -2.570477],
[4.701253, -3.1281867, -2.5695343],
[5.692337, -3.640839, -2.4616065],
[5.45103, -3.6451797, -2.5184312],
[5.107309, -3.4940763, -2.478231],
[2.6783373, -1.4480876, -1.8975571],
[5.1807585, -3.3610148, -2.513155],
[5.0343246, -3.2504637, -2.3884437],
[5.273611, -3.5527153, -2.5969102],
[5.4621787, -3.6608934, -2.5238476],
[5.4608717, -3.5109582, -2.4960275],
[5.82248, -3.7629507, -2.517388],
[5.7347484, -3.8859196, -2.5711641],
[6.112766, -4.150079, -2.6801968],
[5.705344, -3.8973122, -2.7257922],
[4.6984296, -3.2733493, -2.5083308],
[4.3192806, -2.8144884, -2.431129],
[3.3805945, -1.8914232, -2.134581],
[5.3352375, -3.2841783, -2.4582448],
[5.7989273, -3.6409295, -2.5358305],
[5.648154, -3.3823075, -2.5451796],
[5.525385, -3.6856186, -2.5623384],
[5.047093, -3.3972507, -2.5808408],
[4.6922903, -3.1120577, -2.520347],
[4.454125, -2.7146144, -2.4370828],
[5.8160934, -3.7015703, -2.4822454],
[5.842537, -3.7719693, -2.5903497],
[5.584236, -3.6224103, -2.578228],
[4.6949444, -3.0575066, -2.4430642],
[3.7849636, -2.0093102, -2.2221537],
[5.34052, -3.4516811, -2.3648925],
[5.4426284, -3.7016723, -2.6079993],
[5.575471, -3.8038383, -2.6187627],
[5.374604, -3.4713042, -2.472371],
[5.573716, -3.7265248, -2.549485],
[4.758337, -3.2580361, -2.4734373],
[3.182425, -1.8679427, -2.1192076],
[5.5819745, -3.679071, -2.5723608],
[5.2793016, -3.4560566, -2.421187],
[5.5433426, -3.7827773, -2.6599064],
[5.5916505, -3.8679109, -2.5255156],
[5.497374, -3.498955, -2.525767],
[5.8535748, -3.5889792, -2.5111132],
[5.98721, -3.4372945, -2.3722787],
[5.8036013, -3.5048394, -2.530361],
[5.953143, -3.8283353, -2.7150617],
[6.1119184, -4.0932183, -2.832982],
[5.959545, -4.0726967, -2.8936415],
[5.6262956, -3.5740123, -2.5001867],
[5.802416, -3.625824, -2.6191945],
[6.1741114, -3.677545, -2.5701354],
[6.019506, -3.5576932, -2.555736],
[5.670724, -3.441112, -2.5689387],
[5.865163, -3.8021712, -2.7791395],
[6.131137, -4.057909, -2.8261504],
[5.7952104, -3.6973677, -2.644189],
[5.7676497, -3.6922674, -2.5299058],
[5.883764, -3.8301828, -2.5025594],
[5.574148, -3.6859841, -2.5446942],
[4.5494995, -3.0782628, -2.4074235],
[2.3880472, -1.2455968, -1.9546468],
[5.3021474, -3.4626734, -2.5357323],
]
],
dtype=numpy.float32,
),
label_ids=input_ids,
metrics={"eval_loss": 3.2364680767059326},
labels=["O", "LB_SEP", "LB_NS"],
label_map={0: "O", 1: "LB_SEP", 2: "LB_NS"},
tokenizer=[
[
"ラウンジ",
"も",
"気軽",
"に",
"利用",
"でき",
"、",
"申し分",
"ない",
"です",
"。",
"▁",
"ホテル",
"内",
"の",
"部屋",
"も",
"ゆったり",
"でき",
"まし",
"た",
"。",
]
],
subwords_lengths=[
[
1,
1,
2,
1,
1,
1,
1,
1,
2,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
1,
1,
1,
1,
]
],
subword_tokens=[
"[CLS]",
"ラウンジ",
"も",
"気",
"##軽",
"に",
"利用",
"でき",
"、",
"申し",
"##分",
"ない",
"です",
"。",
"▁",
"ホテル",
"内",
"の",
"部屋",
"も",
"ゆ",
"##ったり",
"でき",
"まし",
"た",
"。",
"[SEP]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
"[PAD]",
],
model_type="distil_bert",
dataset_content=[
InputFeatures(
input_ids=[
2,
4703,
693,
12,
5,
73,
1172,
28674,
10766,
7,
666,
15,
3913,
10,
8,
32000,
7052,
9,
8494,
832,
14,
16815,
6,
8567,
18178,
8342,
10,
2992,
8,
32000,
3,
]
+ [0] * 289,
token_type_ids=None,
label_ids=list(input_ids.tolist()),
attention_mask=[1] * 31 + [0] * 289,
document_id="0-1",
)
],
)
return dummy_object
class TestPredictor(unittest.TestCase):
def setUp(self) -> None:
self.morph_annotator = MorphAnnotatorJanome()
self.test_dataset_bert = [
NewlineTestCase(
text="ラウンジも気軽に利用でき、申し分ないです。▁ホテル内の部屋もゆったりできました。", return_value=func_generate_dummy_bert_prediction()
)
]
self.test_dataset_distil_bert = [
NewlineTestCase(
text="ラウンジも気軽に利用でき、申し分ないです。▁ホテル内の部屋もゆったりできました。",
return_value=func_generate_dummy_distil_bert_prediction(),
)
]
def init_tokenized_layer(self, text: str) -> Annotations:
annotations = Annotations()
annotations.add_annotation_layer(
"first",
[
SpanAnnotation(
rule_name=None, start_index=0, end_index=len(text), split_string_type=None, split_string_value=None
)
],
)
res = self.morph_annotator.annotate(text, annotations)
return res
@staticmethod
def reformat_data_structure(tokenized_layer: Annotations) -> typing.List[str]:
tokens = []
for s in tokenized_layer.get_annotation_layer("MorphAnnotatorJanome"):
if s.args is None:
continue
if str(s.args["token"]) == "\n" or str(s.args["token"]) == "▁":
tokens.append(METACHAR_LINE_BREAK)
else:
tokens.append(str(s.args["token"]))
else:
pass
return tokens
@staticmethod
def check_all_prediction_point(original_sentence: typing.List[str], index_predicition: typing.List[int]):
"""予測された箇所が、改行であることを確認する。"""
for t_index in index_predicition:
assert (
METACHAR_LINE_BREAK in original_sentence[t_index]
), f"The predicted point {t_index} is not line break."
def test_run_predict(self):
path_model = ""
predictor_init = MagicMock()
predictor_init.return_value = None
for test_case in self.test_dataset_distil_bert:
transformer_predictor_mock = MagicMock()
transformer_predictor_mock.return_value = test_case.return_value.to_prediction_tuple()
bunkai_predictor_mock_split_long_text = Mock(
return_value=(test_case.return_value.tokenizer, test_case.return_value.subwords_lengths)
)
from typing import List
from bunkai.algorithm.lbd.custom_tokenizers import JanomeSubwordsTokenizer
from bunkai.third.utils_ner import InputExample
# note: this function must be here because this function refers test_case objects.
def func_dummy_convert_examples_to_features(
examples: List[InputExample],
label_list: List[str],
max_seq_length: int,
tokenizer: JanomeSubwordsTokenizer,
cls_token_at_end=False,
cls_token="[CLS]",
cls_token_segment_id=1,
sep_token="[SEP]",
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
pad_token_label_id=-100,
sequence_a_segment_id=0,
mask_padding_with_zero=True,
is_distil_bert: bool = False,
):
return test_case.return_value.dataset_content
# note: this function must be here because this function refers test_case objects.
def convert_ids_to_tokens(ids: typing.Union[int, List[int]], skip_special_tokens: bool = False):
"""dummy method to generate subword-token-sequence from a sequence of token-id"""
return test_case.return_value.subword_tokens
with patch(
"bunkai.algorithm.lbd.train.convert_examples_to_features",
side_effect=func_dummy_convert_examples_to_features,
):
with patch("bunkai.algorithm.lbd.predict.Predictor.__init__", predictor_init):
with patch(
"bunkai.algorithm.lbd.predict.Predictor._split_long_text", bunkai_predictor_mock_split_long_text
):
predictor = bunkai.algorithm.lbd.predict.Predictor(modelpath=pathlib.Path(path_model))
predictor.labels = test_case.return_value.labels
predictor.label_map = test_case.return_value.label_map
predictor.bc = DummyBcObject() # type: ignore
predictor.tokenizer = MagicMock()
predictor.tokenizer.side_effect = DummyJanomeSubwordsTokenizer()
predictor.tokenizer.convert_ids_to_tokens.side_effect = convert_ids_to_tokens
predictor.device = torch.device("cpu")
ret = type(
"Ret",
(object,),
{
"logits": type(
"Ret2",
(object,),
{
"to": lambda x: type(
"Ret4",
(object,),
{
"detach": type(
"Ret3",
(object,),
{
"numpy": lambda x: test_case.return_value.predictions,
},
)
},
)
},
)
},
)
class DummyModelDistilBert:
base_model_prefix = "distilbert"
def __call__(self, input_ids, attention_mask):
return ret
class DummyModelBert:
base_model_prefix = "bert"
def __call__(self, input_ids, attention_mask, token_type_ids):
return ret
if test_case.return_value.model_type == "bert":
predictor.model = DummyModelBert() # type: ignore
elif test_case.return_value.model_type == "distil_bert":
predictor.model = DummyModelDistilBert() # type: ignore
else:
raise Exception("unexpected case.")
tokenized_layer = self.init_tokenized_layer(test_case.text)
tokens = self.reformat_data_structure(tokenized_layer)
# check when return type is TokenIndex
res = list(predictor.predict([tokens]))
self.check_all_prediction_point(tokens, res[0]) # type: ignore
if __name__ == "__main__":
unittest.main()
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:/Users/lukas/PycharmProjects/zd_inventory/ui/zd_mail.ui'
#
# Created by: PyQt5 UI code generator 5.15.6
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_dlg_mail(object):
def setupUi(self, dlg_mail):
dlg_mail.setObjectName("dlg_mail")
dlg_mail.resize(756, 618)
self.layoutWidget = QtWidgets.QWidget(dlg_mail)
self.layoutWidget.setGeometry(QtCore.QRect(240, 570, 501, 31))
self.layoutWidget.setObjectName("layoutWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.layoutWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.btn_send = QtWidgets.QPushButton(self.layoutWidget)
self.btn_send.setAutoDefault(False)
self.btn_send.setObjectName("btn_send")
self.horizontalLayout.addWidget(self.btn_send)
self.btn_cancel = QtWidgets.QPushButton(self.layoutWidget)
self.btn_cancel.setAutoDefault(False)
self.btn_cancel.setObjectName("btn_cancel")
self.horizontalLayout.addWidget(self.btn_cancel)
self.layoutWidget1 = QtWidgets.QWidget(dlg_mail)
self.layoutWidget1.setGeometry(QtCore.QRect(0, 10, 741, 551))
self.layoutWidget1.setObjectName("layoutWidget1")
self.lyt_mail = QtWidgets.QHBoxLayout(self.layoutWidget1)
self.lyt_mail.setContentsMargins(10, 10, 10, 10)
self.lyt_mail.setSpacing(20)
self.lyt_mail.setObjectName("lyt_mail")
self.lyt_expired = QtWidgets.QVBoxLayout()
self.lyt_expired.setObjectName("lyt_expired")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtWidgets.QLabel(self.layoutWidget1)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.formLayout = QtWidgets.QFormLayout()
self.formLayout.setObjectName("formLayout")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.lbl_sender = QtWidgets.QLabel(self.layoutWidget1)
self.lbl_sender.setObjectName("lbl_sender")
self.horizontalLayout_3.addWidget(self.lbl_sender)
self.led_sender = QtWidgets.QLineEdit(self.layoutWidget1)
self.led_sender.setObjectName("led_sender")
self.horizontalLayout_3.addWidget(self.led_sender)
self.formLayout.setLayout(0, QtWidgets.QFormLayout.FieldRole, self.horizontalLayout_3)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.lbl_passowrd = QtWidgets.QLabel(self.layoutWidget1)
self.lbl_passowrd.setObjectName("lbl_passowrd")
self.horizontalLayout_4.addWidget(self.lbl_passowrd)
self.led_password = QtWidgets.QLineEdit(self.layoutWidget1)
self.led_password.setObjectName("led_password")
self.horizontalLayout_4.addWidget(self.led_password)
self.formLayout.setLayout(1, QtWidgets.QFormLayout.FieldRole, self.horizontalLayout_4)
self.chk_default = QtWidgets.QCheckBox(self.layoutWidget1)
self.chk_default.setEnabled(False)
self.chk_default.setLayoutDirection(QtCore.Qt.LeftToRight)
self.chk_default.setAutoFillBackground(False)
self.chk_default.setObjectName("chk_default")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.chk_default)
self.verticalLayout.addLayout(self.formLayout)
self.lyt_expired.addLayout(self.verticalLayout)
self.lbl_expired = QtWidgets.QLabel(self.layoutWidget1)
font = QtGui.QFont()
font.setPointSize(8)
self.lbl_expired.setFont(font)
self.lbl_expired.setObjectName("lbl_expired")
self.lyt_expired.addWidget(self.lbl_expired)
self.lsv_expired = QtWidgets.QListView(self.layoutWidget1)
self.lsv_expired.setEnabled(True)
self.lsv_expired.setAlternatingRowColors(True)
self.lsv_expired.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)
self.lsv_expired.setObjectName("lsv_expired")
self.lyt_expired.addWidget(self.lsv_expired)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.btn_select_all = QtWidgets.QPushButton(self.layoutWidget1)
self.btn_select_all.setAutoDefault(False)
self.btn_select_all.setObjectName("btn_select_all")
self.horizontalLayout_2.addWidget(self.btn_select_all)
self.lyt_expired.addLayout(self.horizontalLayout_2)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.lyt_expired.addLayout(self.verticalLayout_2)
self.lyt_mail.addLayout(self.lyt_expired)
self.lyt_mail_body = QtWidgets.QVBoxLayout()
self.lyt_mail_body.setObjectName("lyt_mail_body")
self.lbl_mail = QtWidgets.QLabel(self.layoutWidget1)
font = QtGui.QFont()
font.setPointSize(8)
self.lbl_mail.setFont(font)
self.lbl_mail.setObjectName("lbl_mail")
self.lyt_mail_body.addWidget(self.lbl_mail)
self.ted_mail = QtWidgets.QPlainTextEdit(self.layoutWidget1)
self.ted_mail.setEnabled(True)
self.ted_mail.setObjectName("ted_mail")
self.lyt_mail_body.addWidget(self.ted_mail)
self.btn_reset_changes = QtWidgets.QPushButton(self.layoutWidget1)
self.btn_reset_changes.setAutoDefault(False)
self.btn_reset_changes.setObjectName("btn_reset_changes")
self.lyt_mail_body.addWidget(self.btn_reset_changes)
self.lyt_mail.addLayout(self.lyt_mail_body)
self.lyt_mail.setStretch(0, 3)
self.lyt_mail.setStretch(1, 7)
self.retranslateUi(dlg_mail)
QtCore.QMetaObject.connectSlotsByName(dlg_mail)
def retranslateUi(self, dlg_mail):
_translate = QtCore.QCoreApplication.translate
dlg_mail.setWindowTitle(_translate("dlg_mail", "Dialog"))
self.btn_send.setText(_translate("dlg_mail", "Wyślij"))
self.btn_cancel.setText(_translate("dlg_mail", "Anuluj"))
self.label.setText(_translate("dlg_mail", "NADAWCA"))
self.lbl_sender.setText(_translate("dlg_mail", "email"))
self.lbl_passowrd.setText(_translate("dlg_mail", "hasło"))
self.chk_default.setText(_translate("dlg_mail", "Ustaw jako domyślne"))
self.lbl_expired.setText(_translate("dlg_mail", "ODBIORCA"))
self.btn_select_all.setText(_translate("dlg_mail", "Zaznacz wszystkich"))
self.lbl_mail.setText(_translate("dlg_mail", "TREŚĆ WIADOMOŚCI"))
self.btn_reset_changes.setText(_translate("dlg_mail", "Resetuj zmiany"))
|
# coding=utf-8
# Copyright 2018-2020 EVA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from src.parser.upload_statement import UploadStatement
from src.parser.evaql.evaql_parserVisitor import evaql_parserVisitor
from src.parser.evaql.evaql_parser import evaql_parser
class Upload(evaql_parserVisitor):
def visitUploadStatement(self, ctx: evaql_parser.UploadStatementContext):
srv_path = self.visit(ctx.fileName()).value
video_blob = self.visit(ctx.videoBlob()).value
stmt = UploadStatement(srv_path, video_blob)
return stmt
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import re
import string
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import PassiveAggressiveClassifier
from flask import Flask, request, redirect, url_for, flash, jsonify
import numpy as np
import json
import pickle
import math
from scrape import c
vectorizer = pickle.load(open("tfidf_vectorizer.pickle",'rb'))
model = pickle.load(open("PA.pickle",'rb'))
import re
import string
def wordopt(text):
text = text.lower()
text = re.sub('\[.*?\]', '', text)
text = re.sub("\\W"," ",text)
text = re.sub('https?://\S+|www\.\S+', '', text)
text = re.sub('<.*?>+', '', text)
text = re.sub('[%s]' % re.escape(string.punctuation), '', text)
text = re.sub('\n', '', text)
text = re.sub('\w*\d\w*', '', text)
return text
link = input("")
test = c(link)
test_x = wordopt(test)
tfidf_x = vectorizer.transform([test_x])
pred = model.predict(tfidf_x)
result = math.ceil(model._predict_proba_lr(tfidf_x)[0][1]*100)
print(result,"% True.")
|
import copy
from dataclasses import replace
from typing import Callable, Sequence, Tuple
import numpy as np
import pytest
from .._evaluation import Evaluation, Quantity
from ..evaluators import SegmentationEvaluator
def get_random_prediction_and_mask(
image_size: Tuple[int, int, int], num_classes: int
) -> Tuple[np.ndarray, np.ndarray]:
return (
np.random.randint(0, num_classes, image_size),
np.random.randint(0, num_classes, image_size),
)
@pytest.mark.parametrize("classes", (["dog", "cat", "snake"], ["dog", "cat"]))
def test_SegmentationEvaluator(classes: Sequence[str]) -> None:
np.random.seed(42)
num_classes = len(classes)
prediction, mask = get_random_prediction_and_mask((2, 256, 256), num_classes)
se = SegmentationEvaluator(num_classes, class_names=classes)
evaluation = se.evaluate(
ground_truth=mask, model_prediction=prediction, model_name="MockModel"
)
assert isinstance(evaluation, Evaluation)
assert evaluation.model_name == "MockModel"
@pytest.mark.parametrize("classes", (["dog", "cat", "snake"], ["dog", "cat"]))
def test_SegmentationEvaluator_perfect_prediction(classes: Sequence[str]) -> None:
np.random.seed(42)
num_classes = len(classes)
predictions, _ = get_random_prediction_and_mask((2, 256, 256), num_classes)
mask = copy.deepcopy(predictions)
se = SegmentationEvaluator(num_classes, class_names=classes)
evaluation = se.evaluate(
ground_truth=mask, model_prediction=predictions, model_name="MockModel"
)
evaluation.figures()
expected_quantities = []
for class_name in classes:
expected_quantities.append(
Quantity(name=f"{class_name} mIoU", value=1.0, higher_is_better=True)
)
expected_quantities.append(
Quantity(name="Class weighted Mean mIoU", value=1.0, higher_is_better=True)
)
assert len(evaluation.quantities) == len(expected_quantities)
for actual, expected in zip(evaluation.quantities, expected_quantities):
# check that everything except value is equal
assert replace(actual, value=42) == replace(expected, value=42)
# check that values are approximately equal
if isinstance(expected.value, str):
assert isinstance(actual, str)
assert actual.value == expected.value
else:
assert isinstance(expected.value, float)
assert isinstance(actual.value, float)
np.testing.assert_allclose(actual.value, expected.value)
@pytest.mark.parametrize(
"num_classes, class_names", [(1, ["dog", "cat"]), (2, ["dog"])]
)
def test_SegmentationEvaluator_inconsistent_class_names(
num_classes: int, class_names: Sequence[str]
) -> None:
"""
Tests if the __init__ method of SegmentationEvaluator raises an error if the
length of the class_names list is not equal to num_classes
"""
with pytest.raises(ValueError):
_ = SegmentationEvaluator(num_classes, class_names=class_names)
@pytest.mark.parametrize("num_classes, class_weights", [(1, [0.2, 0.3]), (2, [0.2])])
def test_SegmentationEvaluator_inconsistent_class_weights(
num_classes: int, class_weights: Sequence[float]
) -> None:
"""
Tests if the __init__ method of SegmentationEvaluator raises an error if the
length of the class_weights list is not equal to num_classes
"""
with pytest.raises(ValueError):
_ = SegmentationEvaluator(num_classes, class_weights=class_weights)
@pytest.mark.parametrize(
"num_classes, ground_truth, model_prediction",
[
(3, *get_random_prediction_and_mask((2, 256, 256), 2)),
(2, *get_random_prediction_and_mask((2, 256, 256), 3)),
],
)
def test_SegmentationEvaluator_inconsistent_num_classes(
num_classes: int, ground_truth: np.ndarray, model_prediction: np.ndarray
) -> None:
"""
Tests if the evaluate method of SegmentationEvaluator raises an error if the
actual number of classes present in the ground_truth/prediction is not equal to
num_classes.
"""
se = SegmentationEvaluator(num_classes)
with pytest.raises(ValueError):
se.evaluate(ground_truth, model_prediction, model_name="MockModel")
@pytest.mark.parametrize(
"num_classes, ground_truth, model_prediction",
[
(
3,
np.random.randint(0, 3, (1, 256, 256)),
np.random.randint(0, 3, (2, 256, 256)),
)
],
)
def test_SegmentationEvaluator_inconsistent_shapes(
num_classes: int, ground_truth: np.ndarray, model_prediction: np.ndarray
) -> None:
"""
Tests if the evaluate method of SegmentationEvaluator raises an error if the
shapes of the ground_truth and model_prediction aren't the same
"""
se = SegmentationEvaluator(num_classes)
with pytest.raises(ValueError):
se.evaluate(ground_truth, model_prediction, model_name="MockModel")
@pytest.mark.parametrize(
"num_classes, ground_truth, model_prediction",
[
(
3,
np.random.randint(0, 3, (256, 256)),
np.random.randint(0, 3, (2, 256, 256)),
),
(
3,
np.random.randint(0, 3, (2, 256, 256)),
np.random.randint(0, 3, (256, 256)),
),
],
)
def test_SegmentationEvaluator_not_a_3D_array(
num_classes: int, ground_truth: np.ndarray, model_prediction: np.ndarray
) -> None:
"""
Tests if the evaluate method of SegmentationEvaluator raises an error if the
ground_truth or model_prediction isn't a 3D array
"""
se = SegmentationEvaluator(num_classes)
with pytest.raises(ValueError):
se.evaluate(ground_truth, model_prediction, model_name="MockModel")
@pytest.mark.parametrize("num_classes", [2, 3])
@pytest.mark.parametrize(
"quantity_filter",
[
lambda name: False,
lambda name: True,
lambda name: "Weighted" not in name,
lambda name: "mIoU" not in name,
],
)
def test_SegmentationEvaluator_filter_quantities(
num_classes: int, quantity_filter: Callable[[str], bool]
) -> None:
np.random.seed(42)
predictions, mask = get_random_prediction_and_mask((2, 256, 256), num_classes)
se_all = SegmentationEvaluator(num_classes)
se_filtering = SegmentationEvaluator(num_classes, filter_quantities=quantity_filter)
evaluation_all = se_all.evaluate(
ground_truth=mask, model_prediction=predictions, model_name="MockModel"
)
evaluation_filtered = se_filtering.evaluate(
ground_truth=mask, model_prediction=predictions, model_name="MockModel"
)
assert replace(evaluation_all, quantities=[], lazy_figures=[]) == replace(
evaluation_filtered, quantities=[], lazy_figures=[]
)
for quantity in evaluation_all.quantities:
if quantity_filter(quantity.name):
same_quantity = evaluation_filtered.get_by_name(quantity.name)
assert same_quantity == quantity
else:
with pytest.raises(ValueError):
evaluation_filtered.get_by_name(quantity.name)
for filtered_quantity in evaluation_filtered.quantities:
same_quantity = evaluation_all.get_by_name(filtered_quantity.name)
assert same_quantity == filtered_quantity
@pytest.mark.parametrize(
"num_classes, desired_number_of_figures, figure_filter",
[
(3, 0, lambda name: False),
(3, 4, lambda name: True),
(3, 1, lambda name: "Heatmap" not in name),
(3, 3, lambda name: "Class" not in name),
(2, 2, lambda name: "Class" not in name),
(2, 3, lambda name: True),
],
)
def test_SegmentationEvaluator_filter_figures(
num_classes: int,
desired_number_of_figures: int,
figure_filter: Callable[[str], bool],
) -> None:
np.random.seed(42)
predictions, mask = get_random_prediction_and_mask((2, 256, 256), num_classes)
se_all = SegmentationEvaluator(num_classes)
se_filtering = SegmentationEvaluator(num_classes, filter_figures=figure_filter)
evaluation_all = se_all.evaluate(
ground_truth=mask, model_prediction=predictions, model_name="MockModel"
)
evaluation_filtered = se_filtering.evaluate(
ground_truth=mask, model_prediction=predictions, model_name="MockModel"
)
assert replace(evaluation_all, lazy_figures=[]) == replace(
evaluation_filtered, lazy_figures=[]
)
assert len(evaluation_filtered.lazy_figures) == desired_number_of_figures
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
..
---------------------------------------------------------------------
___ __ __ __ ___
/ | \ | \ | \ / the automatic
\__ |__/ |__/ |___| \__ annotation and
\ | | | | \ analysis
___/ | | | | ___/ of speech
http://www.sppas.org/
Use of this software is governed by the GNU Public License, version 3.
SPPAS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SPPAS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
This banner notice must not be removed.
---------------------------------------------------------------------
scripts.evalsegmentation.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
... a script to evaluate a segmentation vs a reference segmentation.
It estimates the Unit Boundary Positioning Accuracy and generates an
R script to draw a boxplot of the evaluation.
"""
import sys
import os
import codecs
import os.path
from argparse import ArgumentParser
import subprocess
PROGRAM = os.path.abspath(__file__)
SPPAS = os.path.dirname(os.path.dirname(os.path.dirname(PROGRAM)))
sys.path.append(SPPAS)
from sppas import ubpa
from sppas import sppasRW
from sppas import sppasTranscription
import sppas.src.anndata.aio
# ---------------------------------------------------------------------------
# Constants
# ---------------------------------------------------------------------------
vowels = ["a","e","i","i:","o","u","y","A","E","I","M","O","Q","U","V","Y",
"a~","A~","E~", "e~","i~","o~","O~","O:","u~","U~","eu","EU","{",
"}","@","1","2","3","6","7","8","9","&","3:r","OI","@U","eI","ai",
"aI","au","aU","aj","aw","ei","ew","ia","ie","io","ja","je","jo",
"ju","oj","ou","ua","uo","wa","we","wi","wo","ya","ye","yu",
"A/","O/","U~/"]
consonants = ["b","b_<","c","d","d`","f","g","g_<","h","j","k","l","l`","m",
"n","n`","p","q","r","r`","r\\", "rr","s","s`","t","t`","v",
"w","x","z","z`","B","C","D","F","G","H","J","K","L","M","N",
"R","S","T","W","X","Z","4","5","?","ts","tS","dz","dZ","tK",
"kp","Nm","rr","ss","ts_h","k_h","p_h","t_h","ts_hs","tss"]
fillers = ["laugh", "noise", "fp"]
# ----------------------------------------------------------------------------
# Functions
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
# Functions to manage input annotated files
def get_tier(filename, tier_idx):
"""Return the tier of the given index in an annotated file.
:param filename: (str) Name of the annotated file
:param tier_idx: (int) Index of the tier to get
:returns: sppasTier or None
"""
try:
parser = sppasRW(filename)
trs_input = parser.read(filename)
except:
return None
if tier_idx < 0 or tier_idx >= len(trs_input):
return None
return trs_input[tier_idx]
# ----------------------------------------------------------------------------
def get_tiers(ref_filename, hyp_filename, ref_idx=0, hyp_idx=0):
"""Return a reference and an hypothesis tier from annotated files.
:param ref_filename: Name of the annotated file with the reference
:param hyp_filename: Name of the annotated file with the hypothesis
:param ref_idx: (int)
:param hyp_idx: (int)
:returns: a tuple with sppasTier or None for both ref and hyp
"""
ref_tier = get_tier(ref_filename, ref_idx)
hyp_tier = get_tier(hyp_filename, hyp_idx)
return ref_tier, hyp_tier
# ---------------------------------------------------------------------------
# Function to draw the evaluation as BoxPlots (using an R script)
def test_R():
"""Test if Rscript is available as a command of the system. """
try:
NULL = open(os.devnull, "w")
subprocess.call(['Rscript'], stdout=NULL, stderr=subprocess.STDOUT)
except OSError:
return False
return True
# ---------------------------------------------------------------------------
def exec_Rscript(filenamed, filenames, filenamee, rscriptname, pdffilename):
"""Perform an the R script to draw boxplots from the given files.
Write the script, then execute it, and delete it.
:param filenamed: (str) duration
:param filenames: (str) start
:param filenamee: (str) end
:param rscriptname: (str)
:param pdffilename: PDF file with the result.
"""
with codecs.open(rscriptname, "w", "utf8") as fp:
fp.write("#!/usr/bin/env Rscript \n")
fp.write("# Title: Boxplot for phoneme alignments evaluation \n")
fp.write("\n")
fp.write("args <- commandArgs(trailingOnly = TRUE) \n")
fp.write("\n")
fp.write("# Get datasets \n")
fp.write('dataD <- read.csv("%s",header=TRUE,sep=",") \n' % filenamed)
fp.write('dataPS <- read.csv("%s",header=TRUE,sep=",") \n' % filenames)
fp.write('dataPE <- read.csv("%s",header=TRUE,sep=",") \n' % filenamee)
fp.write("\n")
fp.write("# Define Output file \n")
fp.write('pdf(file="%s", paper="a4") \n'%pdffilename)
fp.write("\n")
fp.write("# Control plotting style \n")
fp.write("par(mfrow=c(3,1)) # only one line and one column \n")
fp.write("par(cex.lab=1.2) # controls the font size of the axis title \n")
fp.write("par(cex.axis=1.2) # controls the font size of the axis labels \n")
fp.write("par(cex.main=1.6) # controls the font size of the title \n")
fp.write("\n")
fp.write("# Then, plot: \n")
fp.write("boxplot(dataD$DeltaD~dataD$PhoneD, \n")
fp.write(' main="Delta Duration", # graphic title \n')
fp.write(' ylab="T(automatic) - T(manual)", # y axis title \n')
fp.write(' #range=0, # use min and max for the whisker \n')
fp.write(' outline = FALSE, # REMOVE OUTLIERS \n')
fp.write(' border="blue", \n')
fp.write(' ylim=c(-0.05,0.05), \n')
fp.write(' col="pink") \n')
fp.write(" abline(0,0) \n")
fp.write("\n")
fp.write('boxplot(dataPS$DeltaS~dataPS$PhoneS, \n')
fp.write(' main="Delta Start Position", \n')
fp.write(' ylab="T(automatic) - T(manual)", \n')
fp.write(' outline = FALSE, \n')
fp.write(' border = "blue", \n')
fp.write(' ylim=c(-0.05,0.05), \n')
fp.write(' col = "pink") \n')
fp.write(' abline(0,0) \n')
fp.write("\n")
fp.write('boxplot(dataPE$DeltaE~dataPE$PhoneE, \n')
fp.write(' main="Delta End Position", \n')
fp.write(' ylab="T(automatic) - T(manual)", \n')
fp.write(' outline = FALSE, \n')
fp.write(' border="blue", \n')
fp.write(' ylim=c(-0.05,0.05), \n')
fp.write(' col="pink") \n')
fp.write('abline(0,0) \n')
fp.write('graphics.off() \n')
fp.write("\n")
fp.close()
command = "Rscript " + rscriptname
try:
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
retval = p.wait()
line = p.communicate()
except OSError as e:
os.remove(rscriptname)
return e
os.remove(rscriptname)
if retval != 0:
return line
return ""
# ---------------------------------------------------------------------------
def boxplot(deltaposB, deltaposE, deltaposD, extras, out_name, vector, name):
"""Create a PDF file with boxplots of selected phonemes.
:param vector: the list of phonemes
"""
filenamed = out_name+"-delta-duration-"+name+".csv"
filenames = out_name+"-delta-position-start-"+name+".csv"
filenamee = out_name+"-delta-position-end-"+name+".csv"
fpb = codecs.open(filenames, "w", 'utf8')
fpe = codecs.open(filenamee, "w", 'utf8')
fpd = codecs.open(filenamed, "w", 'utf8')
fpb.write("PhoneS,DeltaS\n")
fpe.write("PhoneE,DeltaE\n")
fpd.write("PhoneD,DeltaD\n")
for i, extra in enumerate(extras):
etiquette = extra[0]
tag = extra[2]
if etiquette in vector:
if tag != 0:
fpb.write("%s,%f\n" % (etiquette, deltaposB[i]))
if tag != -1:
fpe.write("%s,%f\n" % (etiquette, deltaposE[i]))
fpd.write("%s,%f\n" % (etiquette, delta_durationur[i]))
fpb.close()
fpe.close()
fpd.close()
message = exec_Rscript(filenamed, filenames, filenamee,
out_name+".R", out_name+"-delta-"+name+".pdf")
os.remove(filenamed)
os.remove(filenames)
os.remove(filenamee)
return message
# ---------------------------------------------------------------------------
# Main program
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Verify and extract args:
parser = ArgumentParser(
usage="%(prog)s -fr ref -fh hyp [options]",
description="Compare two segmentation boundaries, "
"in the scope of evaluating an hypothesis vs a reference.")
parser.add_argument(
"-fr",
metavar="file",
required=True,
help='Input annotated file/directory name of the reference.')
parser.add_argument(
"-fh",
metavar="file",
required=True,
help='Input annotated file/directory name of the hypothesis.')
parser.add_argument(
"-tr",
metavar="file",
type=int,
default=1,
required=False,
help='Tier number of the reference (default=1).')
parser.add_argument(
"-th",
metavar="file",
type=int,
default=1,
required=False,
help='Tier number of the hypothesis (default=1).')
parser.add_argument(
"-d",
metavar="delta",
required=False,
type=float,
default=0.04,
help='Delta max value for the UBPA estimation (default=0.02).')
parser.add_argument(
"-o",
metavar="path",
required=False,
help='Path for the output files.')
parser.add_argument(
"--quiet",
action='store_true',
help="Disable the verbosity.")
if len(sys.argv) <= 1:
sys.argv.append('-h')
args = parser.parse_args()
# ----------------------------------------------------------------------------
# Global variables
idxref_tier = args.tr - 1
idxhyp_tier = args.th - 1
files = [] # List of tuples: (ref_filename, hyp_filename)
delta_durationur = [] # Duration of each phoneme
deltaposB = [] # Position of the beginning boundary of each phoneme
deltaposE = [] # Position of the end boundary of each phoneme
deltaposM = [] # Position of the center of each phoneme
extras = [] # List of tuples: (evaluated phoneme,hypothesis file names, a tag)
# ----------------------------------------------------------------------------
# Prepare file names to be analyzed, as a list of tuples (ref,hyp)
out_path = None
if args.o:
out_path = args.o
if not os.path.exists(out_path):
os.mkdir(out_path)
if os.path.isfile(args.fh) and os.path.isfile(args.fr):
hyp_filename, extension = os.path.splitext(args.fh)
out_basename = os.path.basename(hyp_filename)
if out_path is None:
out_path = os.path.dirname(hyp_filename)
out_name = os.path.join(out_path, out_basename)
files.append((os.path.basename(args.fr), os.path.basename(args.fh)))
ref_directory = os.path.dirname(args.fr)
hyp_directory = os.path.dirname(args.fh)
elif os.path.isdir(args.fh) and os.path.isdir(args.fr):
if out_path is None:
out_path = args.fh
out_name = os.path.join(out_path, "phones")
ref_directory = args.fr
hyp_directory = args.fh
ref_files = []
hyp_files = []
for fr in os.listdir(args.fr):
if os.path.isfile(os.path.join(ref_directory, fr)):
ref_files.append(fr)
for fh in os.listdir(args.fh):
if os.path.isfile(os.path.join(hyp_directory, fh)):
hyp_files.append(os.path.basename(fh))
for fr in ref_files:
base_fr, ext_fr = os.path.splitext(fr)
if not ext_fr.lower() in sppas.src.anndata.aio.extensions:
continue
for fh in hyp_files:
base_fh, ext_fh = os.path.splitext(fh)
if not ext_fh.lower() in sppas.src.anndata.aio.extensions:
continue
if fh.startswith(base_fr):
files.append((fr, fh))
else:
print("Both reference and hypothesis must be of the same type: "
"file or directory.")
sys.exit(1)
if not args.quiet:
print("Results will be stored in: {}".format(out_name))
if len(files) == 0:
print("No matching hyp/ref files. Nothing to do!")
sys.exit(1)
# ----------------------------------------------------------------------------
# Evaluate the delta from the hypothesis to the reference
# Delta = T(hyp) - T(ref)
if not args.quiet:
print("Results are evaluated on {:d} files: ".format(len(files)))
for f in files:
if not args.quiet:
print(" {:s}".format(os.path.basename(f[1])))
fr = os.path.join(ref_directory, f[0])
fh = os.path.join(hyp_directory, f[1])
ref_tier, hyp_tier = get_tiers(fr, fh, idxref_tier, idxhyp_tier)
if ref_tier is None or hyp_tier is None:
print("[ INFO ] No aligned phonemes found in tiers. Nothing to do. ")
continue
if len(ref_tier) != len(hyp_tier):
print("[ ERROR ] Hypothesis: {} -> {} vs Reference: {} -> {} phonemes."
.format(f[1], len(hyp_tier), f[0], len(ref_tier)))
continue
if not args.quiet:
print("[ OK ] Hypothesis: {} vs Reference: {} -> {} phonemes."
.format(f[1], f[0], len(ref_tier)))
# ----------------------------------------------------------------------------
# Compare boundaries and durations of annotations.
i = 0
imax = len(ref_tier)-1
for ref_ann, hyp_ann in zip(ref_tier, hyp_tier):
etiquette = ref_ann.serialize_labels()
if etiquette == "#":
continue
# begin
rb = ref_ann.get_location().get_best().get_begin().get_midpoint()
hb = hyp_ann.get_location().get_best().get_begin().get_midpoint()
delta_start = hb-rb
# end
re = ref_ann.get_location().get_best().get_end().get_midpoint()
he = hyp_ann.get_location().get_best().get_end().get_midpoint()
delta_end = he-re
# middle
rm = rb + (re-rb)/2.
hm = hb + (he-hb)/2.
delta_center = hm-rm
# duration
rd = ref_ann.get_location().get_best().duration().get_value()
hd = hyp_ann.get_location().get_best().duration().get_value()
delta_duration = hd-rd
tag = 1
if i == 0:
tag = 0
elif i == imax:
tag = -1
# Add new values into vectors, to evaluate the accuracy
deltaposB.append(delta_start)
deltaposE.append(delta_end)
deltaposM.append(delta_center)
delta_durationur.append(delta_duration)
extras.append((etiquette, fh, tag))
i += 1
# ----------------------------------------------------------------------------
# Save delta values into output files
fpb = codecs.open(os.path.join(out_name)+"-delta-position-start.txt", "w", 'utf8')
fpe = codecs.open(os.path.join(out_name)+"-delta-position-end.txt", "w", 'utf8')
fpm = codecs.open(os.path.join(out_name)+"-delta-position-middle.txt", "w", 'utf8')
fpd = codecs.open(os.path.join(out_name)+"-delta-duration.txt", "w", 'utf8')
fpb.write("Phone Delta Filename\n")
fpe.write("Phone Delta Filename\n")
fpm.write("Phone Delta Filename\n")
fpd.write("Phone Delta Filename\n")
for i, extra in enumerate(extras):
etiquette = extra[0]
filename = extra[1]
tag = extra[2]
if tag != 0:
fpb.write("%s %f %s\n" % (etiquette, deltaposB[i], filename))
if tag != -1:
fpe.write("%s %f %s\n" % (etiquette, deltaposE[i], filename))
fpm.write("%s %f %s\n" % (etiquette, deltaposM[i], filename))
fpd.write("%s %f %s\n" % (etiquette, delta_durationur[i], filename))
fpb.close()
fpe.close()
fpm.close()
fpd.close()
# ----------------------------------------------------------------------------
# Estimates the Unit Boundary Positioning Accuracy
if not args.quiet:
ubpa(deltaposB, "Start boundary", sys.stdout, delta_max=args.d, step=0.005)
with open(out_name+"-eval-position-start.txt", "w") as fp:
ubpa(deltaposB, "Start boundary position", fp, delta_max=args.d, step=0.005)
with open(out_name+"-eval-position-end.txt", "w") as fp:
ubpa(deltaposE, "End boundary position", fp, delta_max=args.d, step=0.005)
with open(out_name+"-eval-position-middle.txt", "w") as fp:
ubpa(deltaposM, "Middle boundary position", fp, delta_max=args.d, step=0.005)
with open(out_name+"-eval-duration.txt", "w") as fp:
ubpa(delta_durationur, "Duration", fp, delta_max=args.d, step=0.005)
# ----------------------------------------------------------------------------
# Draw BoxPlots of the accuracy via an R script
if test_R() is False:
sys.exit(0)
message = boxplot(deltaposB, deltaposE, delta_durationur, extras, out_name, vowels, "vowels")
if len(message) > 0 and not args.quiet:
print("{:s}".format(message))
message = boxplot(deltaposB, deltaposE, delta_durationur, extras, out_name, consonants, "consonants")
if len(message) > 0 and not args.quiet:
print("{:s}".format(message))
message = boxplot(deltaposB, deltaposE, delta_durationur, extras, out_name, fillers, "fillers")
if len(message) > 0 and not args.quiet:
print("{:s}".format(message))
others = []
known = vowels+consonants+fillers
for extra in extras:
etiquette = extra[0]
if not (etiquette in known or etiquette in others):
others.append(etiquette)
if len(others) > 0:
message = boxplot(deltaposB, deltaposE, delta_durationur, extras, out_name, others, "others")
if len(message) > 0 and not args.quiet:
print("{:s}".format(message))
|
import numpy as np
class DataClusterAnalysis:
def __init__(self):
pass
def _getNumberOfPointsInCluster(self, data,clusterWidth):
'''
take the first element in the data set and get the next n data points that
are within the cluster. returns n
'''
count = 1;
# print(data[count+1] - data[count] < clusterWidth)
while count < len(data) and data[count] - data[count -1] < clusterWidth[0]\
and data[count] - data[0] < clusterWidth[1]:
count += 1
return count
def _clusterByData(self, data1,data2,clusterWidth):
# data1Cluster = []
# data2Cluster = []
dataClusters = [[],[]]
while (len(data1) > 0):
numberOfPoints = self._getNumberOfPointsInCluster(data1, clusterWidth)
dataClusters[0].append(data1[:numberOfPoints])
dataClusters[1].append(data2[:numberOfPoints])
# data1Cluster.append(np.mean(data1[:numberOfPoints]))
# data2Cluster.append(np.mean(data2[:numberOfPoints]))
data1 = np.delete(data1, np.arange(numberOfPoints))
data2 = np.delete(data2, np.arange(numberOfPoints))
return dataClusters
def _clusterDataStat(self, dataClusters, operator):
'''
This is a little convoluted, but it runs through the data and takes the mean
'''
dataSize = len(dataClusters[0])
clusterStat = [np.zeros(dataSize), np.zeros(dataSize)]
for i in range(dataSize):
clusterStat[0][i] = operator(dataClusters[0][i])
clusterStat[1][i] = operator(dataClusters[1][i])
return clusterStat
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-09 13:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('webui', '0006_dataset_name'),
]
operations = [
migrations.AddField(
model_name='dataset',
name='complete',
field=models.BooleanField(default=True, help_text='Is each element present in each ranking of the dataset?'),
preserve_default=False,
),
migrations.AlterField(
model_name='dataset',
name='step',
field=models.IntegerField(blank=True, help_text='The number of steps used to generate the dataset, if pertinent', null=True),
),
]
|
import pytest
from JDI.jdi_assert.testing.assertion import Assert
from tests.jdi_uitests_webtests.main.enums.preconditions import Preconditions
from tests.jdi_uitests_webtests.main.page_objects.epam_jdi_site import EpamJDISite
from tests.jdi_uitests_webtests.test.init_tests import InitTests
@pytest.mark.web
class SmokeTableTests(InitTests):
table_as_text = (
"||X||Type|Now|Plans||\n"
+ "||1||Drivers|Selenium, Custom|JavaScript, Appium, WinAPI, Sikuli||\n"
+ "||2||Test Runner|TestNG, JUnit, Custom|MSTest, NUnit, Epam||\n"
+ "||3||Asserter|TestNG, JUnit, Custom|MSTest, NUnit, Epam||\n"
+ "||4||Logger|Log4J, TestNG log, Custom|Epam, XML/Json logging, Hyper logging||\n"
+ "||5||Reporter|Jenkins, Allure, Custom|EPAM Report portal, Serenity, TimCity, Hudson||\n"
+ "||6||BDD/DSL|Custom|Cucumber, Jbehave, Thucydides, SpecFlow||"
)
table = EpamJDISite.support_page.support_table
def setUp(self):
Preconditions.SUPPORT_PAGE.is_in_state()
def test_get_value(self):
Assert.assert_equal(self.table.get_value(), self.table_as_text)
def test_get_text(self):
Assert.assert_equal(self.table.get_text(), self.table_as_text)
def test_table_dimension(self):
Assert.assert_equal(str(self.table.columns.get_count()) + "/" + str(self.table.rows.get_count()), "3/6")
def test_table_column_headers(self):
Assert.assert_equal(", ".join(self.table.columns.get_headers()), "Type, Now, Plans")
def test_table_headers(self):
Assert.assert_equal(", ".join(self.table.get_headers()), "Type, Now, Plans")
def test_table_row_headers(self):
Assert.assert_equal(", ".join(self.table.rows.get_headers()), "1, 2, 3, 4, 5, 6")
def test_table_is_not_empty(self):
Assert.assert_false(self.table.is_empty())
|
"""A set of classes used to represent Mods in memory."""
# Disable warnings about 'too few class methods'
# pylint: disable=R0903
from typing import List, NamedTuple, Type, Union
def yaml_serializable(cls: Type[NamedTuple]):
"""Make a NamedTuple serializable by PyYAML."""
class Wrapper(object):
"""Provides the __dict__ property for annotated objects."""
def __init__(self, *args) -> None:
self.__wrapped = cls(*args)
self.__dict__ = dict(self.__wrapped._asdict())
return Wrapper
@yaml_serializable
class Media(NamedTuple):
"""Container for media URLs."""
images: List[str] # Screenshots of the mod
videos: List[str] # Videos of the mod
@yaml_serializable
class Webpages(NamedTuple):
"""Container for the mod's homepages."""
steam: Union[str, None] # The URL of the mod's workshop page, or None
nexus: Union[str, None] # The URL of the mod's nexus page, or None
bethesda: Union[str, None] # The URL of the mod's bethesda page, or None
others: List[str] # The URLs for the mod's other pages (GH, AFKM, etc)
@yaml_serializable
class Requirement(NamedTuple):
"""Pointer to another mod needed by this mod."""
modid: Union[int, None] # The modid of the required mod, or None if N/A
name: str # The name of the required mod
optional: bool # If true, the mod will work without the requirement
@yaml_serializable
class Oldrim(NamedTuple):
"""Container for oldrim-specific data."""
is_oldrim: bool # If true, the mod has an oldrim version
webpages: Webpages
requirements: List[Requirement]
@yaml_serializable
class Sse(NamedTuple):
"""Container for SSE-specific data."""
is_sse: bool # If true, the mod has an SSE version
webpages: Webpages
requirements: List[Requirement]
console_compat: bool = False # If true, this mod can be used on a console
@yaml_serializable
class Mod(NamedTuple):
"""A collection of useful info about a mod."""
modid: int # The id of this mod (order it was added)
name: str # The name of this mod
description: str # A description of what this mod does
notes: List[str] # Installation notes and warnings
gems_category: Union[str, None] # This mod's category in GEMS, if any
media: Media
oldrim: Oldrim
sse: Sse
tags: List[str] = [] # A list of strings
deprecated: bool = False # Whether this mod has been replaced
entry_verified: bool = False # Whether this entry was checked by a human
@yaml_serializable
class MiniOldrim(NamedTuple):
"""A stripped-down Oldrim object."""
is_oldrim: bool
@yaml_serializable
class MiniSse(NamedTuple):
"""A stripped-down Sse object."""
is_sse: bool
console_compat: bool = False
@yaml_serializable
class MiniMod(NamedTuple):
"""A stripped-down mod for transfer over networks."""
modid: int
name: str
gems_category: Union[str, None]
oldrim: MiniOldrim
sse: MiniSse
tags: List[str]
|
"""Test the pyscript component."""
from ast import literal_eval
import asyncio
from datetime import datetime as dt
import time
import homeassistant.components.pyscript.trigger as trigger
from homeassistant.const import EVENT_HOMEASSISTANT_STARTED, EVENT_STATE_CHANGED
from homeassistant.setup import async_setup_component
from tests.async_mock import mock_open, patch
async def setup_script(hass, notify_q, now, source):
"""Initialize and load the given pyscript."""
scripts = [
"/some/config/dir/pyscripts/hello.py",
]
with patch(
"homeassistant.components.pyscript.os.path.isdir", return_value=True
), patch(
"homeassistant.components.pyscript.glob.iglob", return_value=scripts
), patch(
"homeassistant.components.pyscript.open",
mock_open(read_data=source),
create=True,
), patch(
"homeassistant.components.pyscript.trigger.dt_now", return_value=now
):
assert await async_setup_component(hass, "pyscript", {})
#
# I'm not sure how to run the mock all the time, so just force the dt_now()
# trigger function to return the given list of times in now.
#
def return_next_time():
nonlocal now
if isinstance(now, list):
if len(now) > 1:
return now.pop(0)
return now[0]
return now
trigger.__dict__["dt_now"] = return_next_time
if notify_q:
async def state_changed(event):
var_name = event.data["entity_id"]
if var_name != "pyscript.done":
return
value = event.data["new_state"].state
await notify_q.put(value)
hass.bus.async_listen(EVENT_STATE_CHANGED, state_changed)
async def wait_until_done(notify_q):
"""Wait for the done handshake."""
return await asyncio.wait_for(notify_q.get(), timeout=4)
async def test_state_trigger(hass, caplog):
"""Test state trigger."""
notify_q = asyncio.Queue(0)
await setup_script(
hass,
notify_q,
[dt(2020, 7, 1, 10, 59, 59, 999999), dt(2020, 7, 1, 11, 59, 59, 999999)],
"""
from math import sqrt
seq_num = 0
#
# Instead of just a bare @time_trigger, do a real time trigger.
# The first value of now() causes func_startup_sync() to start almost
# immediately. The remaining values of now() are all and hour later at
# 11:59:59.999999, so this trigger won't happen again for another 24 hours.
#
@time_trigger("once(2020/07/01 11:00:00)")
def func_startup_sync():
global seq_num
seq_num += 1
log.info(f"func_startup_sync setting pyscript.done = {seq_num}")
pyscript.done = seq_num
@state_trigger("pyscript.f1var1 == '1'")
def func1(var_name=None, value=None):
global seq_num
seq_num += 1
log.info(f"func1 var = {var_name}, value = {value}")
pyscript.done = [seq_num, var_name, int(value), sqrt(1024)]
@state_trigger("pyscript.f1var1 == '1' or pyscript.f2var2 == '2'")
@state_active("pyscript.f2var3 == '3' and pyscript.f2var4 == '4'")
def func2(var_name=None, value=None):
global seq_num
seq_num += 1
log.info(f"func2 var = {var_name}, value = {value}")
pyscript.done = [seq_num, var_name, int(value), sqrt(4096)]
@event_trigger("fire_event")
def fire_event(**kwargs):
event.fire(kwargs["new_event"], arg1=kwargs["arg1"], arg2=kwargs["arg2"])
@event_trigger("test_event3", "arg1 == 20 and arg2 == 30")
def func3(trigger_type=None, event_type=None, **kwargs):
global seq_num
seq_num += 1
log.info(f"func3 trigger_type = {trigger_type}, event_type = {event_type}, event_data = {kwargs}")
pyscript.done = [seq_num, trigger_type, event_type, kwargs]
@event_trigger("test_event4", "arg1 == 20 and arg2 == 30")
def func4(trigger_type=None, event_type=None, **kwargs):
global seq_num
seq_num += 1
res = task.wait_until(event_trigger=["test_event4b", "arg1 == 25 and arg2 == 35"], timeout=10)
log.info(f"func4 trigger_type = {res}, event_type = {event_type}, event_data = {kwargs}")
pyscript.done = [seq_num, res, event_type, kwargs]
seq_num += 1
res = task.wait_until(state_trigger="pyscript.f4var2 == '2'", timeout=10)
log.info(f"func4 trigger_type = {res}")
pyscript.done = [seq_num, res]
pyscript.setVar1 = 1
pyscript.setVar2 = "var2"
state.set("pyscript.setVar3", {"foo": "bar"})
state.set("pyscript.setVar1", 1 + int(state.get("pyscript.setVar1")), {"attr1": 456, "attr2": 987})
seq_num += 1
res = task.wait_until(state_trigger="pyscript.f4var2 == '10'", timeout=10)
log.info(f"func4 trigger_type = {res}")
pyscript.done = [seq_num, res, pyscript.setVar1, pyscript.setVar1.attr1, state.get("pyscript.setVar1.attr2"), pyscript.setVar2, state.get("pyscript.setVar3")]
seq_num += 1
#
# now() returns 1usec before 2020/7/1 12:00:00, so trigger right
# at noon
#
res = task.wait_until(time_trigger="once(2020/07/01 12:00:00)", timeout=10)
log.info(f"func4 trigger_type = {res}")
pyscript.done = [seq_num, res]
seq_num += 1
#
# this should pick up the trigger interval at noon
#
res = task.wait_until(time_trigger="period(2020/07/01 11:00, 1 hour)", timeout=10)
log.info(f"func4 trigger_type = {res}")
pyscript.done = [seq_num, res]
seq_num += 1
#
# cron triggers at 10am, 11am, noon, 1pm, 2pm, 3pm, so this
# should trigger at noon.
#
res = task.wait_until(time_trigger="cron(0 10-15 * * *)", timeout=10)
log.info(f"func4 trigger_type = {res}")
pyscript.done = [seq_num, res]
seq_num += 1
#
# also add some month and day ranges; should still trigger at noon
# on 7/1.
#
res = task.wait_until(time_trigger="cron(0 10-15 1-5 6,7 *)", timeout=10)
log.info(f"func4 trigger_type = {res}")
pyscript.done = [seq_num, res]
seq_num += 1
#
# make sure a short timeout works, for a trigger further out in time
# (7/5 at 3pm)
#
res = task.wait_until(time_trigger="cron(0 15 5 6,7 *)", timeout=1e-6)
log.info(f"func4 trigger_type = {res}")
pyscript.done = [seq_num, res]
seq_num += 1
#
# make sure a short timeout works when there are no other triggers
#
res = task.wait_until(timeout=1e-6)
log.info(f"func4 trigger_type = {res}")
pyscript.done = [seq_num, res]
seq_num += 1
#
# make sure we return when there no triggers and no timeout
#
res = task.wait_until()
log.info(f"func4 trigger_type = {res}")
pyscript.done = [seq_num, res]
seq_num += 1
#
# make sure we return when there only past triggers and no timeout
#
res = task.wait_until(time_trigger="once(2020/7/1 11:59:59.999)")
log.info(f"func4 trigger_type = {res}")
pyscript.done = [seq_num, res]
#
# create a run-time exception
#
no_such_function("xyz")
""",
)
seq_num = 0
seq_num += 1
# fire event to start triggers, and handshake when they are running
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
assert literal_eval(await wait_until_done(notify_q)) == seq_num
seq_num += 1
# initialize the trigger and active variables
hass.states.async_set("pyscript.f1var1", 0)
hass.states.async_set("pyscript.f2var2", 0)
hass.states.async_set("pyscript.f2var3", 0)
hass.states.async_set("pyscript.f2var4", 0)
# try some values that shouldn't work, then one that does
hass.states.async_set("pyscript.f1var1", 0)
hass.states.async_set("pyscript.f1var1", "string")
hass.states.async_set("pyscript.f1var1", -1)
hass.states.async_set("pyscript.f1var1", 1)
assert literal_eval(await wait_until_done(notify_q)) == [
seq_num,
"pyscript.f1var1",
1,
32,
]
assert "func1 var = pyscript.f1var1, value = 1" in caplog.text
seq_num += 1
hass.states.async_set("pyscript.f2var3", 3)
hass.states.async_set("pyscript.f2var4", 0)
hass.states.async_set("pyscript.f2var2", 0)
hass.states.async_set("pyscript.f1var1", 0)
hass.states.async_set("pyscript.f1var1", 1)
assert literal_eval(await wait_until_done(notify_q)) == [
seq_num,
"pyscript.f1var1",
1,
32,
]
seq_num += 1
hass.states.async_set("pyscript.f2var4", 4)
hass.states.async_set("pyscript.f2var2", 2)
assert literal_eval(await wait_until_done(notify_q)) == [
seq_num,
"pyscript.f2var2",
2,
64,
]
assert "func2 var = pyscript.f2var2, value = 2" in caplog.text
seq_num += 1
hass.bus.async_fire("test_event3", {"arg1": 12, "arg2": 34})
hass.bus.async_fire("test_event3", {"arg1": 20, "arg2": 29})
hass.bus.async_fire("test_event3", {"arg1": 12, "arg2": 30})
hass.bus.async_fire(
"fire_event", {"new_event": "test_event3", "arg1": 20, "arg2": 30}
)
assert literal_eval(await wait_until_done(notify_q)) == [
seq_num,
"event",
"test_event3",
{"arg1": 20, "arg2": 30},
]
seq_num += 1
hass.states.async_set("pyscript.f4var2", 2)
hass.bus.async_fire("test_event4", {"arg1": 20, "arg2": 30})
t_now = time.monotonic()
while notify_q.empty() and time.monotonic() < t_now + 4:
hass.bus.async_fire("test_event4b", {"arg1": 15, "arg2": 25})
hass.bus.async_fire("test_event4b", {"arg1": 20, "arg2": 25})
hass.bus.async_fire("test_event4b", {"arg1": 25, "arg2": 35})
await asyncio.sleep(1e-3)
trig = {
"trigger_type": "event",
"event_type": "test_event4b",
"arg1": 25,
"arg2": 35,
}
assert literal_eval(await wait_until_done(notify_q)) == [
seq_num,
trig,
"test_event4",
{"arg1": 20, "arg2": 30},
]
seq_num += 1
# the state_trigger wait_until should succeed immediately, since the expr is true
assert literal_eval(await wait_until_done(notify_q)) == [
seq_num,
{"trigger_type": "state"},
]
seq_num += 1
# now try a few other values, then the correct one
hass.states.async_set("pyscript.f4var2", 4)
hass.states.async_set("pyscript.f4var2", 2)
hass.states.async_set("pyscript.f4var2", 10)
trig = {
"trigger_type": "state",
"var_name": "pyscript.f4var2",
"value": "10",
"old_value": "2",
}
result = literal_eval(await wait_until_done(notify_q))
assert result[0] == seq_num
assert result[1] == trig
assert result[2:5] == ["2", 456, 987]
assert hass.states.get("pyscript.setVar1").state == "2"
assert hass.states.get("pyscript.setVar1").attributes == {
"attr1": 456,
"attr2": 987,
}
assert hass.states.get("pyscript.setVar2").state == "var2"
assert literal_eval(hass.states.get("pyscript.setVar3").state) == {"foo": "bar"}
#
# check for the three time triggers, two timeouts and two none
#
for trig_type in ["time"] * 4 + ["timeout"] * 2 + ["none"] * 2:
seq_num += 1
assert literal_eval(await wait_until_done(notify_q)) == [
seq_num,
{"trigger_type": trig_type},
]
assert "name 'no_such_function' is not defined" in caplog.text
|
from django.urls import path, include
from django.contrib.auth import views as auth_views
from . import views
app_name = "users"
urlpatterns = [
path("register", views.register, name="register"),
path("update_profile", views.update_profile, name="update_profile")
]
|
import pandas as pd
from pandas.core.frame import DataFrame
from typing import Tuple
def get_user_secondary_df(name: str, value_column: str) -> DataFrame:
"""
Args:
name (str):
value_column (str):
Returns:
DataFrame:
"""
df = pd.read_csv("data/user" + name + ".csv", encoding='utf-8')
# create list of columns
columns = ['userID'] + list("U" + name + "_" + df[value_column].unique())
new_df = pd.DataFrame(columns=columns)
# TODO: change type of columns to int
for column in new_df.columns[1:]:
new_df[column].astype(int)
# iterate over every row
for user in df['userID'].unique():
# create row with user ID and zero values in other columns
row = [user] + [0] * (len(columns) - 1)
# append row to dataframe
new_df = new_df.append(pd.DataFrame([row], columns=columns), ignore_index=True)
# set every cuisine user has (likes)
for value in df.loc[df.userID == user][value_column]:
new_df.at[new_df.index.max(), "U" + name + "_" + value] = 1
return new_df
def get_users() -> DataFrame:
""" Get users dataframe with respective data.
Returns:
Dataframe: Expected data.
"""
df = pd.read_csv("data/userprofile.csv", encoding='utf-8')
# drop useless columns
df.drop(columns=['color', 'interest', 'personality', 'height',
'activity', 'religion', 'ambience'],
inplace=True)
user_ids = set()
# remove '?' values
for column in df.columns:
if df[column].dtype.name == 'object':
for user_id in df.loc[df[column] == "?"].userID:
user_ids.add(user_id)
df = df[df[column] != '?']
df.smoker = df.smoker.replace({'true': 1, 'false': 0})
# select categorical column names
categorical_columns = [column for column in df.columns
if df[column].dtype.name == 'object'
if column not in ['userID', 'smoker']]
# replace categorical columns with one hot encoding
for column_name in categorical_columns:
dummies = pd.get_dummies(df[column_name])
for dummy_column_name in dummies.columns:
df[column_name + "_" + dummy_column_name] = dummies[dummy_column_name]
df.drop(columns=[column_name], inplace=True)
df_cuisine = get_user_secondary_df('cuisine', 'Rcuisine')
df_payment = get_user_secondary_df('payment', 'Upayment')
# merge dataframes on user ID
new_df = df.merge(df_payment, on='userID', how='inner') \
.merge(df_cuisine, on='userID', how='inner')
return new_df
|
#!/usr/bin/env python
# Parse the options the user provided from the command line
def option_parser():
parser = OptionParser()
parser.add_option("-v", action="store_true", dest="verbose", default=False)
parser.add_option("--inplace", action="store_true", dest="inplace", default=False)
parser.add_option("-d", dest="data_format_name", default="")
parser.add_option("-g", type="float", dest="gammaval", default=1.0)
parser.add_option("--sRGB", action="store_true", dest="sRGB", default=False)
parser.add_option("--tile", nargs=3, dest="tile")
parser.add_option("--scanline", action="store_true", dest="scanline", default=False)
parser.add_option("--separate", action="store_true", dest="separate", default=False)
parser.add_option("--contig", action="store_true", dest="contig", default=False)
parser.add_option("--compression", dest="compression")
parser.add_option("--quality", type="int", dest="quality", default = -1)
parser.add_option("--no-copy-image", action="store_true", dest="no_copy", default=False)
parser.add_option("--adjust-time", action="store_true", dest="adjust_time", default=False)
parser.add_option("--caption", dest="caption", default=None)
parser.add_option("-k", "--keyword", action="append", dest="keywords")
parser.add_option("--clear-keywords", action="store_true", default=False)
parser.add_option("--attrib", nargs=2, action="append", dest="attribs")
parser.add_option("--orientation", type="int", dest="orientation", default = 0)
parser.add_option("--rotcw", action="store_true", dest="rotcw", default=False)
parser.add_option("--rotccw", action="store_true", dest="rotccw", default=False)
parser.add_option("--rot180", action="store_true", dest="rot180", default=False)
parser.add_option("--plugin-path", dest="path", default="")
# FIXME: I suppose there should be a way to enter input/output files without
# having to specify an option, like "python iconvert.py -g 0.9 input.jpg output.jpg"
# However, I could not find it in the docs so I've set it that the user has
# to put -i and -o before input/output.
parser.add_option("-i", action="append", dest="input_files", default=[])
parser.add_option("-o", action="append", dest="output_files", default=[])
(options, args) = parser.parse_args()
if len(options.input_files) > len(options.output_files) and not options.inplace:
print "Must have both an input and output filename specified"
return False
if len(options.input_files) == 0 and options.inplace:
print "Must have at least one filename specified"
return False
if (int(options.rotcw) + int(options.rotccw) + int(options.rot180) + \
(options.orientation>0)) > 1:
print "iconvert: more than one of --rotcw, --rotccw, --rot180, --orientation"
return False
if options.path == "":
print "OIIO plugin path not provided, assuming \"\""
return parser.parse_args()
def convert_files(in_file, out_file):
nocopy = options.no_copy
tempname = out_file
# Check whether the conversion is inplace.
if tempname == in_file:
try:
ext = out_file.rfind(".")
tempname += ".tmp" + out_file[ext:]
except:
print "Error: Output file does not have an extension"
# image input
inp = oiio.ImageInput.create(in_file, options.path)
if not inp:
msg = "Could not crete ImageInput for " + in_file
sys.exit(msg)
inspec = oiio.ImageSpec()
inp.open(in_file, inspec)
# image output
out = oiio.ImageOutput.create(tempname, options.path)
if not out:
msg = "Unable to create ImageOutput for " + out_file
sys.exit(msg)
# adjust spec
outspec = inspec
nocopy = adjust_spec(inp, inspec, outspec)
out.open(tempname, outspec, False)
# convert
if nocopy == False:
ok = out.copy_image(inp)
if not ok:
print "Error"
else:
arr = array.array("B", "\0" * inspec.image_bytes())
ok = inp.read_image(outspec.format, arr)
if not ok:
print "Error reading"
else:
ok = out.write_image(outspec.format, arr)
if not ok:
print "Error writing"
out.close()
inp.close()
# if the conversion was --inplace, this will result to True
if out_file != tempname:
if ok:
# since it was inplace, in_file == out_file
# so we need to replace the original file with tempfile
os.remove(out_file)
os.rename(tempname, out_file)
else:
os.remove(tempname)
def adjust_spec(inp, inspec, outspec):
nocopy = options.no_copy
# the following line is from the original iconvert, but I'm not sure
# it is needed. It's already outspec = inspec, right?
#outspec.set_format(inspec.format)
if options.data_format_name != "":
if data_format_name == "uint8":
outspec.set_format(oiio.BASETYPE.UINT8)
elif data_format_name == "int8":
outspec.set_format(oiio.BASETYPE.INT8)
elif data_format_name == "uint16":
outspec.set_format(oiio.BASETYPE.UINT16)
elif data_format_name == "int16":
outspec.set_format(oiio.BASETYPE.INT16)
elif data_format_name == "half":
outspec.set_format(oiio.BASETYPE.HALF)
elif data_format_name == "float":
outspec.set_format(oiio.BASETYPE.FLOAT)
elif data_format_name == "double":
outspec.set_format(oiio.BASETYPE.DOUBLE)
if outspec.format != inspec.format:
nocopy = True
if options.gammaval != outspec.gamma:
outspec.gamma = options.gammaval
nocopy = True
if options.sRGB:
outspec.linearity = oiio.sRGB
#ImageSpec.find_attribute() is not exposed to Python
#if inp.format_name() != "jpeg" or outspec.find_attribute("Exif:ColorSpace"):
#outspec.attribute("Exif:ColorSpace", 1)
# handling tiles is not exposed to Python
if options.tile:
outspec.tile_width = options.tile[0]
outspec.tile_height = options.tile[1]
outspec.tile_depth = options.tile[2]
if options.scanline:
outspec.tile_width = 0
outspec.tile_height = 0
outspec.tile_depth = 0
if outspec.tile_width != inspec.tile_width or \
outspec.tile_height != inspec.tile_height or \
outspec.tile_depth != inspec.tile_depth:
nocopy = True
if options.compression:
outspec.attribute("compression", options.compression)
# 2nd argument should be exposed as default
if options.compression != inspec.get_string_attribute("compression", ""):
nocopy = True
# FIXME: If quality is provided, the resultig image is larger than the
# input image, and it is always the same no matter what quality (1-100).
# (I suppose it uses the maximum possible value)
# Should a --compression method be provided if --quality is used?
if options.quality > 0:
outspec.attribute("CompressionQuality", options.quality)
# the 2nd argument should be exposed as default (in ImageSpec wrapper)
# FIXME: The default arg is supposed to be 0, and get_int_attribute always
# returns whatever is provided as the 2nd argument - 0 in this case.
# I can't find out what's wrong in the binding.
if options.quality != inspec.get_int_attribute("CompressionQuality", 0):
nocopy = True
if options.contig:
outspec.attribute("planarconfig", "contig")
if options.separate:
outspec.attribute("planarconfig", "separate")
if options.orientation >= 1:
outspec.attribute("Orientation", options.orientation)
else:
orientation = outspec.get_int_attribute("Orientation", 1)
if orientation >= 1 and orientation <= 8:
cw = [0, 6, 7, 8, 5, 2, 3, 4, 1]
if options.rotcw or options.rotccw or options.rot180:
orientation = cw[orientation]
if options.rotcw or options.rot180:
orientation = cw[orientation]
if options.rotccw:
orientation = cw[orientation]
outspec.attribute("Orientation", orientation)
if options.caption != None:
outspec.attribute("ImageDescription", options.caption)
if options.clear_keywords == True:
outspec.attribute("Keywords", "")
# this looks a lot simpler than in c++ :)
if options.keywords != None:
oldkw = outspec.get_string_attribute("Keywords", "")
newkw = oldkw
for keyword in options.keywords:
newkw += "; " + keyword
outspec.attribute("Keywords", newkw)
if options.attribs:
for i in options.attribs:
outspec.attribute(i[0], i[1])
return nocopy
# main
import OpenImageIO as oiio
import array
from optparse import OptionParser
import os
import sys
(options, args) = option_parser()
if options.inplace:
for image in options.input_files:
if convert_files(image, image) == False:
sys.exit("Conversion failed")
else:
for i in range(len(options.input_files)):
if convert_files(options.input_files[i], options.output_files[i]) == False:
sys.exit("Conversion failed")
|
import logging
from PySide2 import QtWidgets
from channel_box_plus import widgets
from channel_box_plus import utils
log = logging.getLogger(__name__)
def execute():
"""
Add the search interface and colouring functionality to Maya's main
channel box. If channelBoxPlus is already installed a RuntimeError
exception will be thrown. A threshold can be set, this threshold
determines when the attributes should change colour. the higher the
threshold the more the 2 attributes will have to match up to stay the
same colour.
:raises RuntimeError: When the channel box plus is already installed.
"""
# get widgets
channel_box = utils.get_channel_box()
parent = channel_box.parent()
parent_layout = parent.layout()
parent_layout.setSpacing(0)
# validate search widget
for i in range(parent_layout.count()):
item = parent_layout.itemAt(0)
if isinstance(item.widget(), widgets.SearchWidget):
raise RuntimeError("channel-box-plus has already been installed.")
# initialize search widget
search_widget = widgets.SearchWidget(parent)
# add search widget to layout
if isinstance(parent_layout, QtWidgets.QLayout):
item = parent_layout.itemAt(0)
widget = item.widget()
parent_layout.removeWidget(widget)
parent_layout.addWidget(search_widget)
parent_layout.addWidget(widget)
else:
parent_layout.insertWidget(0, search_widget)
log.info("channel-box-plus installed successfully.")
|
word = "banana"
check = "a"
print(word.count(check))
|
import csv
from django.core.management.base import BaseCommand
from gsr_booking.models import GSR
class Command(BaseCommand):
def handle(self, *args, **kwargs):
with open("gsr_booking/data/gsr_data.csv") as data:
reader = csv.reader(data)
for i, row in enumerate(reader):
if i == 0:
continue
# collects room information from csv
lid, gid, name, service = row
# gets image from s3 given the lid and gid
# TODO: fix image url!
image_url = (
f"https://s3.us-east-2.amazonaws.com/labs.api/gsr/lid-{lid}-gid-{gid}.jpg"
)
kind = GSR.KIND_WHARTON if service == "wharton" else GSR.KIND_LIBCAL
GSR.objects.create(lid=lid, gid=gid, name=name, kind=kind, image_url=image_url)
self.stdout.write("Uploaded GSRs!")
|
import pr2hub
import discord
import math
import re
from discord.ext import commands
from cogs.utils import exp
class PR2():
def __init__(self, bot):
self.bot = bot
self.quoted = re.compile(r"\"(.+)\"")
@commands.command(description="returns player information",
aliases=["pi", "view"],
brief="player_name")
async def player_info(self, *, player_name : str):
#region SANITY
if len(player_name) > 20:
await self.bot.say("Parameter too long: `player_name`")
return
#endregion
player = None
match_obj = self.quoted.match(player_name)
try:
if match_obj:
print("quoted")
player = pr2hub.get_player_info(match_obj.group(num=0))
else:
player = pr2hub.get_player_info(player_name)
except pr2hub.PR2HubError as e:
await self.bot.say(str(e))
return
description = f"**Name:** {player.name}\n"
description += f"**Status:** {player.status}\n"
description += f"**Group:** {player.group}\n"
description += f"**Guild:** {player.guild_name}\n"
description += f"**Rank:** {player.rank}\n"
description += f"**Hats:** {player.hats}\n"
description += f"**Joined:** {player.register_date}\n"
description += f"**Active:** {player.login_date}"
embed = discord.Embed(title="-- Player Info --", description=description)
await self.bot.say(embed=embed)
@commands.command(description="returns guild information",
aliases=["gi", "guild"],
brief="guild_name")
async def guild_info(self, *, guild_name : str):
#region SANITY
if len(guild_name) > 20:
await self.bot.say("Parameter too long: `guild_name`")
return
#endregion
guild = None
try:
guild = pr2hub.get_guild_info(guild_name, False)
except pr2hub.PR2HubError as e:
await self.bot.say(str(e))
return
description = f"**Name:** {guild.name}\n"
description += "**GP Today:** {:,}\n".format(int(guild.gp_today))
description += "**GP Total:** {:,}\n".format(int(guild.gp_total))
description += f"**Members:** {guild.member_count} ({guild.active_count} active)\n"
description += f"**Creation Date:** {guild.creation_date}\n"
description += f"**Prose:** {guild.note}"
embed = discord.Embed(title="-- Guild Info --", description=description)
embed.set_thumbnail(url=pr2hub.emblems.url + guild.emblem)
await self.bot.say(embed=embed)
@commands.command(description="returns the names of all members in a guild",
aliases=["gm", "guildm"],
brief="guild_name")
async def guild_members(self, *, guild_name : str):
#region SANITY
if len(guild_name) > 20:
await self.bot.say("Parameter too long: `guild_name`")
return
#endregion
guild = None
try:
guild = pr2hub.get_guild_info(guild_name, True)
except pr2hub.PR2HubError as e:
await self.bot.say(str(e))
return
member_count_is_even = guild.member_count % 2 == 0
half_member_count = int(guild.member_count / 2)
guild_member_names = []
value1 = []
value2 = []
# create a list of just the member names
for member in guild.members:
guild_member_names.append(member.name)
if member_count_is_even:
value1 = guild_member_names[:half_member_count]
value2 = guild_member_names[half_member_count:half_member_count*2]
else:
value1 = guild_member_names[:half_member_count+1]
value2 = guild_member_names[half_member_count+1:half_member_count*2+1]
embed = discord.Embed(title=f"-- Guild Members ({len(guild_member_names)}) --")
embed.add_field(name="1", value="\n".join(value1), inline=True)
if guild.member_count > 1:
embed.add_field(name="2", value="\n".join(value2), inline=True)
await self.bot.say(embed=embed)
@commands.command(description="calculates experience required to reach a specified rank",
aliases=["xp"],
brief="from [to [exp_per_day=720]]")
async def exp(self, _from : int, to : int=None, exp_per_day=720):
if to == None:
to = _from + 1
#region SANITY
if _from > 1000 or _from < 0:
await self.bot.say("Parameter out of range: `from`")
return
if to > 1000 or to < 0:
await self.bot.say("Parameter out of range: `to`")
return
if exp_per_day > 10000 or exp_per_day < 0:
await self.bot.say("Parameter out of range: `exp_per_day`")
return
#endregion
_exp = exp.calculate(_from, to)
days = int(_exp/(exp_per_day*1000))
_exp = "{:,}".format(_exp)
embed = discord.Embed(title="-- Experience Needed --",
description=f"{_from} -> {to} = {_exp}\nroughly {days} days")
await self.bot.say(embed=embed)
@commands.command(description="returns info of every server",
aliases=["hh", "status"])
async def server_info(self):
servers = None
try:
servers = pr2hub.get_servers_info()
except pr2hub.PR2HubError as e:
await self.bot.say(str(e))
return
description = ""
for server in servers:
line = ""
line += server.name
if server.status == "down":
line += " (down)"
else:
line += f" ({server.population} online)"
if server.is_happy_hour:
line = f"**!! {line}**"
line += "\n"
description += line
embed = discord.Embed(title="-- Server Info --", description=description)
await self.bot.say(embed=embed)
def setup(bot):
bot.add_cog(PR2(bot))
|
"""
Helper functions for parsing python code, as required by ScriptTask
"""
import ast
from itertools import chain
import re
import sys
from typing import Container, Iterator, List, Tuple
from ..exceptions import ScriptParseError
_BUILTINS_WHITELIST = {
"abs",
"all",
"any",
"ascii",
"bin",
"chr",
"dir",
"divmod",
"environ",
"format",
"getattr",
"hasattr",
"hex",
"iter",
"len",
"max",
"min",
"next",
"oct",
"ord",
"pow",
"repr",
"round",
"sorted",
"sum",
"None",
"Ellipsis",
"False",
"True",
"bool",
"memoryview",
"bytearray",
"bytes",
"complex",
"dict",
"enumerate",
"filter",
"float",
"frozenset",
"int",
"list",
"map",
"range",
"reversed",
"set",
"slice",
"str",
"tuple",
"type",
"zip",
}
Substitution = Tuple[Tuple[int, int], str]
def resolve_function_call(
source: str, arguments: Container[str], args_prefix: str = "__args."
):
"""
Validate function call and substitute references to arguments with their namespaced
counterparts (e.g. `my_arg` => `args.my_arg`).
"""
call_node = parse_and_validate(source)
substitutions: List[Substitution] = []
# Collect all the variables
name_nodes: Iterator[ast.Name] = chain(
(
node
for arg in call_node.args
for node in ast.walk(arg)
if isinstance(node, ast.Name)
),
(
node
for kwarg in call_node.keywords
for node in ast.walk(kwarg.value)
if isinstance(node, ast.Name)
),
)
for node in name_nodes:
if node.id in _BUILTINS_WHITELIST:
# builtin values have precedence over unqualified args
continue
if node.id in arguments:
substitutions.append(
(_get_name_node_abs_range(source, node), args_prefix + node.id)
)
else:
raise ScriptParseError(
"Invalid variable reference in script: "
+ _get_name_source_segment(source, node)
)
# Prefix references to arguments with args_prefix
return _apply_substitutions(source, substitutions)
def parse_and_validate(source: str):
"""
Parse the given source into an ast, validate that is consists of a single function
call, and return the Call node.
"""
try:
module = ast.parse(source)
except SyntaxError as error:
raise ScriptParseError(f"Invalid script content: {source}") from error
if len(module.body) != 1:
raise ScriptParseError(
f"Expected a single python expression, instead got: {source}"
)
first_statement = module.body[0]
if not isinstance(first_statement, ast.Expr):
raise ScriptParseError(f"Expected a function call, instead got: {source}")
call_node = first_statement.value
if not isinstance(call_node, ast.Call):
raise ScriptParseError(f"Expected a function call, instead got: {source}")
node = call_node.func
while isinstance(node, ast.Attribute):
node = node.value
if not isinstance(node, ast.Name):
raise ScriptParseError(f"Invalid function reference in: {source}")
return call_node
def _apply_substitutions(content: str, subs: List[Substitution]):
"""
Returns a copy of content with all of the substitutions applied.
Uses a single pass for efficiency.
"""
cursor = 0
segments: List[str] = []
for ((start, end), replacement) in sorted(subs, key=lambda x: x[0][0]):
in_between = content[cursor:start]
segments.extend((in_between, replacement))
cursor += len(in_between) + (end - start)
segments.append(content[cursor:])
return "".join(segments)
# This pattern matches the sequence of chars from the begining of the string that are
# *probably* a valid identifier
IDENTIFIER_PATTERN = r"[^\s\!-\/\:-\@\[-\^\{-\~`]+"
def _get_name_node_abs_range(source: str, node: ast.Name):
"""
Find the absolute start and end offsets of the given name node in the source.
"""
source_lines = re.findall(r".*?(?:\r\n|\r|\n)", source + "\n")
prev_lines_offset = sum(len(line) for line in source_lines[: node.lineno - 1])
own_line_offset = len(
source_lines[node.lineno - 1].encode()[: node.col_offset].decode()
)
total_start_chars_offset = prev_lines_offset + own_line_offset
name_content = re.match( # type: ignore
IDENTIFIER_PATTERN, source[total_start_chars_offset:]
).group()
while not name_content.isidentifier() and name_content:
name_content = name_content[:-1]
return (total_start_chars_offset, total_start_chars_offset + len(name_content))
def _get_name_source_segment(source: str, node: ast.Name):
"""
Before python 3.8 the ast module didn't allow for easily identifying the source
segment of a node, so this function provides this functionality specifically for
name nodes as needed here.
The fallback logic is specialised for name nodes which cannot span multiple lines
and must be valid identifiers. It is expected to be correct in all cases, and
performant in common cases.
"""
if sys.version_info.minor >= 8:
return ast.get_source_segment(source, node) # type: ignore
partial_result = (
re.split(r"(?:\r\n|\r|\n)", source)[node.lineno - 1]
.encode()[node.col_offset :]
.decode()
)
# The name probably extends to the first ascii char outside of [a-zA-Z\d_]
# regex will always match with valid arguments to this function
partial_result = re.match(IDENTIFIER_PATTERN, partial_result).group() # type: ignore
# This bit is a nasty hack, but probably always gets skipped
while not partial_result.isidentifier() and partial_result:
partial_result = partial_result[:-1]
return partial_result
|
from flask import Flask, render_template_string
from sucuri import rendering
app = Flask(__name__)
@app.route("/")
def index():
template = rendering.template('template.suc',{"text": "Hello! I'm here!", "var":[1, 2, 3, 4]})
return render_template_string(template)
|
# Author: Qianru Zhou
# Email: zhouqr333@126.com
# Magic, do not touch!!
""" generate immature detector, a file with random 96 bits binary strings
"""
from random import randint
def reduce_duplicate(inputPath, outputPath):
""" delete the duplicate strings in the log for efficiency
"""
patterns = []
with open(inputPath, 'r') as f:
for line in f.readlines():
if line not in patterns:
patterns.append(line)
with open(outputPath, 'w+') as out:
for pattern in patterns:
out.write(pattern)
# another way to do that
#tmp = open('/Users/silvia/Documents/code/my/my_pcap/log').readlines()
#set(tmp)
#with open('/Users/silvia/Documents/code/my/my_pcap/uniLog', 'w+') as f:
#for item in tmp:
#f.write(item)
def create_antibody():
""" generate 96 bits long binary strings randomly
"""
s = list(''.zfill(96))
for i in range(0,96):
s[i]=str(randint(0,9)%2)
return ''.join(s)
if __name__ == '__main__':
# with open('/Users/silvia/Documents/code/my/my_pcap/log/raw_immature_detector', 'w+') as f:
# for _ in range(5000000):
# f.write(create_antibody()+'\n')
reduce_duplicate('/Users/silvia/Documents/code/my/my_pcap/log/raw_immature_detector', \
'/Users/silvia/Documents/code/my/my_pcap/log/immature_detector')
|
DEEPREACH_DIR = '/'.join(__file__.split('/')[:-2])
environment = f'{DEEPREACH_DIR}/environment.yml'
not_installed = f'{DEEPREACH_DIR}/change_env_file/packages_not_found.txt'
not_in_osx = f'{DEEPREACH_DIR}/change_env_file/not_in_osx.txt'
desired_env = f'{DEEPREACH_DIR}/new_environment.yml'
def remove_build(s: str) -> str:
'''
Removes the build from a string representing a package
'''
new_s = s[:s.rfind('=')]
if '=' not in new_s: return s
return new_s
def make_fuzzy(s: str) -> str:
'''
Changes the == to an = in the package specification
'''
return s.replace('==', '=', 1)
def make_exact(s: str) -> str:
'''
Does the opposite of above
'''
return s.replace('=', '==', 1)
if __name__ == '__main__':
with open(not_installed) as ni:
not_installed_list = ni.read().split('\n')
with open(not_in_osx) as nio:
not_in_osx_list = nio.read().split('\n')
with open(environment) as e:
env_list = e.read().split('\n')
not_installed_list = [make_fuzzy(package) for package in not_installed_list]
new_env_list = env_list[:]
matches = 0
indices = {val: index for index, val in enumerate(env_list)}
for package in not_installed_list:
index = indices.get(package, -1)
if index != -1:
new_env_list[index] = remove_build(new_env_list[index])
matches += 1
else:
print('The following package was not found in the environment.yml file:', package)
if matches == len(not_installed_list):
print(f'All {matches} packages had their build strings removed!')
else:
print(f'There were {matches} packages out of {len(not_installed_list)} whose build string was removed.')
matches = 0
for package in not_in_osx_list:
for e_package in new_env_list:
if package in e_package:
new_env_list.remove(e_package)
new_env_list.append(' ' + make_exact(package))
matches += 1
break
else:
print('The following package was not found in the environment.yml file:', package)
if matches == len(not_in_osx_list):
print(f'All {matches} packages were moved under the pip section!')
else:
print(f'There were {matches} packages out of {len(not_in_osx_list)} which were moved under the pip section.')
with open(desired_env, 'w') as d:
d.write('\n'.join(new_env_list))
# with open(not_installed) as n:
# not_installed_list = n.read().split('\n')
# with open(environment) as e:
# environment_list = e.read().split('\n')
# not_installed_list = [make_fuzzy(package) for package in not_installed_list]
# new_env_list = environment_list[:]
# matches = 0
# pip_location = 197
# assert 'pip:' in environment_list[pip_location]
# not_pip = set(environment_list[:pip_location])
# for package in not_installed_list:
# if package in not_pip:
# new_env_list.remove(package)
# matches += 1
# new_env_list.append(' ' + remove_build(make_exact(package)))
# else:
# print('The following package was not found in the environment.yml file:', package)
# with open(desired_env, 'w') as d:
# d.write('\n'.join(new_env_list))
# if matches == len(not_installed_list):
# print(f'All {matches} packages were successfully moved under the \'pip\' section')
# else:
# print(f'There were {matches} packages out of {len(not_installed_list)} that were moved under the \'pip\' section.')
# input()
|
#-*-coding:utf-8-*-
import numpy as np
import pandas as pd
import time
from bayes_smoothing import *
from sklearn.preprocessing import LabelEncoder
import copy
def roll_browse_fetch(df, column_list):
print("==========================roll_browse_fetch ing==============================")
df = df.sort('context_timestamp')
df['tmp_count'] = df['status']
for c in column_list:
if isinstance(c, (list, tuple)):
pair = [cc for cc in c]
c = '_'.join(c)
else:
pair = [c]
df['%s_browse' %c] = df.groupby(pair)['tmp_count'].cumsum()
del df['tmp_count']
return df
def roll_click_fetch(df, column_list):
print("==========================roll_click_fetch ing==============================")
for c in column_list:
if isinstance(c, (list, tuple)):
pair = [cc for cc in c]
c = '_'.join(c)
else:
pair = [c]
df['%s_click' %c] = df.groupby(pair)['is_trade'].cumsum()
df['%s_click' %c] = df['%s_click' %c]-df['is_trade']
return df
def roll_rate_fetch(df, column_list):
df = roll_browse_fetch(df,column_list)
df = roll_click_fetch(df,column_list)
print("==========================roll_rate_fetch ing==============================\n")
for c in column_list:
if isinstance(c, (list, tuple)):
c = '_'.join(c)
df['%s_rate' %c] = bs_utilize(df['%s_browse' %c], df['%s_click' %c])
# del df['%s_browse' %c]
return df
#===================================按天的转化率==============================
def roll_browse_day(df, column_list):
df = df.sort('context_timestamp')
df['tmp_count'] = df['status']
df_data_temp =df.copy()
for c in column_list:
if isinstance(c, (list, tuple)):
pair = [cc for cc in c]
c = '_'.join(c)
else:
pair = [c]
pair.append('day')
df_temp = df.groupby(pair)['tmp_count'].agg({"browse_temp":np.sum}).reset_index()
pair_temp =copy.copy(pair)
pair_temp.remove('day')
df_temp["{}_day_browse".format(c)] = df_temp.groupby(pair_temp)["browse_temp"].cumsum()
df_temp["{}_day_browse".format(c)] = df_temp["{}_day_browse".format(c)] - df_temp['browse_temp']
del df_temp['browse_temp']
df_data_temp = pd.merge(df_data_temp,df_temp,how = "left",on = pair )
del df['tmp_count']
return df_data_temp
def roll_click_day_hour(df,column_list):
df = df.sort('context_timestamp')
df_data_temp =df.copy()
for c in column_list:
if isinstance(c, (list, tuple)):
pair = [cc for cc in c]
c = '_'.join(c)
else:
pair = [c]
pair.append('day')
df_temp = df.groupby(pair)['is_trade'].agg({"click_temp":np.sum}).reset_index()
pair_temp = copy.copy(pair)
pair_temp.remove('day')
df_temp["{}_day_click".format(c)] = df_temp.groupby(pair_temp)["click_temp"].cumsum()
df_temp["{}_day_click".format(c)] = df_temp["{}_day_click".format(c)] - df_temp['click_temp']
del df_temp['click_temp']
df_data_temp = pd.merge(df_data_temp,df_temp,how = "left",on = pair)
return df_data_temp
def roll_rate_day(df,column_list):
print("==========================roll_rate_day ing==============================")
df = roll_browse_day(df,column_list)
df =roll_click_day(df,column_list)
for c in column_list:
if isinstance(c, (list, tuple)):
c = '_'.join(c)
df['%s_day_rate' %c] = bs_utilize(df['%s_day_browse' %c], df['%s_day_click' %c])
# del df['%s_day_browse'%c]
# del df['%s_day_click'%c]
return df
#===================================按天小时的转化率==============================
def roll_browse_day_hour(df, column_list):
df = df.sort('context_timestamp')
df['tmp_count'] = df['status']
df_data_temp =df.copy()
for c in column_list:
if isinstance(c, (list, tuple)):
pair = [cc for cc in c]
c = '_'.join(c)
else:
pair = [c]
pair.append('day')
pair.append('hour')
df_temp = df.groupby(pair)['tmp_count'].agg({"browse_temp":np.sum}).reset_index()
pair_temp =copy.copy(pair)
pair_temp.remove('day')
pair_temp.remove('hour')
df_temp["{}_day_hour_browse".format(c)] = df_temp.groupby(pair_temp)["browse_temp"].cumsum()
df_temp["{}_day_hour_browse".format(c)] = df_temp["{}_day_hour_browse".format(c)] - df_temp['browse_temp']
del df_temp['browse_temp']
df_data_temp = pd.merge(df_data_temp,df_temp,how = "left",on = pair )
del df['tmp_count']
return df_data_temp
def roll_click_day_hour(df,column_list):
df = df.sort('context_timestamp')
df_data_temp =df.copy()
for c in column_list:
if isinstance(c, (list, tuple)):
pair = [cc for cc in c]
c = '_'.join(c)
else:
pair = [c]
pair.append('day')
pair.append('hour')
df_temp = df.groupby(pair)['is_trade'].agg({"click_temp":np.sum}).reset_index()
pair_temp = copy.copy(pair)
pair_temp.remove('day')
pair_temp.remove('hour')
df_temp["{}_day_hour_click".format(c)] = df_temp.groupby(pair_temp)["click_temp"].cumsum()
df_temp["{}_day_hour_click".format(c)] = df_temp["{}_day_hour_click".format(c)] - df_temp['click_temp']
del df_temp['click_temp']
df_data_temp = pd.merge(df_data_temp,df_temp,how = "left",on = pair)
return df_data_temp
def roll_rate_day_hour(df,column_list):
print("==========================roll_rate_day ing==============================")
df = roll_browse_day_hour(df,column_list)
df =roll_click_day_hour(df,column_list)
for c in column_list:
if isinstance(c, (list, tuple)):
c = '_'.join(c)
df['%s_day_hour_rate' %c] = bs_utilize(df['%s_day_hour_browse' %c], df['%s_day_hour_click' %c])
# del df['%s_day_browse'%c]
# del df['%s_day_click'%c]
return df
#===================================按小时的转化率==============================
def roll_browse_hour(df, column_list):
df = df.sort('context_timestamp')
df['tmp_count'] = df['status']
df_data_temp =df.copy()
for c in column_list:
if isinstance(c, (list, tuple)):
pair = [cc for cc in c]
c = '_'.join(c)
else:
pair = [c]
pair.append('hour')
df_temp = df.groupby(pair)['tmp_count'].agg({"browse_temp":np.sum}).reset_index()
pair_temp =copy.copy(pair)
pair_temp.remove('hour')
df_temp["{}_hour_browse".format(c)] = df_temp.groupby(pair_temp)["browse_temp"].cumsum()
df_temp["{}_hour_browse".format(c)] = df_temp["{}_hour_browse".format(c)] - df_temp['browse_temp']
del df_temp['browse_temp']
df_data_temp = pd.merge(df_data_temp,df_temp,how = "left",on = pair )
del df['tmp_count']
return df_data_temp
def roll_click_hour(df,column_list):
df = df.sort('context_timestamp')
df_data_temp =df.copy()
for c in column_list:
if isinstance(c, (list, tuple)):
pair = [cc for cc in c]
c = '_'.join(c)
else:
pair = [c]
pair.append('hour')
df_temp = df.groupby(pair)['is_trade'].agg({"click_temp":np.sum}).reset_index()
pair_temp = copy.copy(pair)
pair_temp.remove('hour')
df_temp["{}_hour_click".format(c)] = df_temp.groupby(pair_temp)["click_temp"].cumsum()
df_temp["{}_hour_click".format(c)] = df_temp["{}_hour_click".format(c)] - df_temp['click_temp']
del df_temp['click_temp']
df_data_temp = pd.merge(df_data_temp,df_temp,how = "left",on = pair)
return df_data_temp
def roll_rate_hour(df,column_list):
print("==========================roll_rate_hour ing==============================")
df = roll_browse_hour(df,column_list)
df =roll_click_hour(df,column_list)
for c in column_list:
if isinstance(c, (list, tuple)):
c = '_'.join(c)
df['%s_hour_rate' %c] = bs_utilize(df['%s_hour_browse' %c], df['%s_hour_click' %c])
return df
def label_encoding(df, columns):
for c in columns:
le = LabelEncoder()
df[c] = le.fit_transform(df[c])
return df
# # #----------------统计特征-----------------
# def get_last_diff_statistic(data,col_list, n_last_diff):
# print("=======get_last_diff============\n")
# data_temp = data
# col_id = col_list[0],col_list[1]
# data = data.sort_values([col_id, 'timestamp'])
# data['next_id'] = data[col_id].shift(-1)
# data['next_actionTime'] = data.timestamp.shift(-1)
# data = data.loc[data.next_id == data[col_id]].copy()
# data['action_diff'] = data['next_actionTime'] - data['timestamp']
# if n_last_diff is not None:
# df_n_last_diff = data.groupby(col_id, as_index=False).tail(n_last_diff).copy()
# df_last_diff_statistic = df_n_last_diff.groupby(col_id, as_index=False).action_diff.agg({
# '{}_last_{}_action_diff_mean'.format(col_id,n_last_diff): np.mean,
# '{}_last_{}_action_diff_std'.format(col_id,n_last_diff): np.std,
# '{}_last_{}_action_diff_max'.format(col_id,n_last_diff): np.max,
# '{}_last_{}_action_diff_min'.format(col_id,n_last_diff): np.min
# })
# else:
# grouped_user = data.groupby(col_id, as_index=False)
# n_last_diff = 'all'
# df_last_diff_statistic = grouped_user.action_diff.agg({
# '{}_last_{}_action_diff_mean'.format(col_id,n_last_diff): np.mean,
# '{}_last_{}_action_diff_std'.format(col_id,n_last_diff): np.std,
# '{}_last_{}_action_diff_max'.format(col_id,n_last_diff): np.max,
# '{}_last_{}_action_diff_min'.format(col_id,n_last_diff): np.min
# })
# res_data = pd.merge(data_temp,df_last_diff_statistic,how="left",on = col_id)
# return res_data
# #-----------------------时间特征-----------------------
# # #--时间间隔特征、
# def chafen(df):
# return pd.DataFrame(np.diff(df,axis = 0))
# def get_last_diff(data, col_list,n_last_diff):
# """获取最后 n_last_diff 个动作之间的时间间隔"""
# print("=======get_last_diff============\n")
# for col in col_list:
# col_sort = col.copy()
# col_sort.append('timestamp')
# data = data.sort_values(col_sort,ascending = False)
# data_temp = data.groupby(col)['timestamp'].apply(chafen).reset_index()
# data_temp.columns = [col[0],col[1],'level','time_gap']
# data_temp = data_temp.loc[data_temp.level<n_last_diff]
# data_temp['time_gap'] = -1*data_temp['time_gap']
# data_temp['level'] = str(col[0])+"_"+str(col[1])+"_last_time_gap"+ data_temp['level'].astype('str')
# data_temp = pd.pivot_table(data_temp,index=[col[0],col[1]],values='time_gap',columns='level').reset_index()
# res_data = pd.merge(data,data_temp,how="left",on = [col[0],col[1]])
# return res_data
#--时间间隔特征
def time_diff_feat(data,col_list):
print("get tiem diff...")
for col in col_list:
col_sort = copy.copy(col)
col_sort.append('timestamp')
data_temp = data.sort(col_sort,ascending = True)
data_temp['{}_{}_time_diff'.format(col[0],col[1])] = data_temp.groupby(col)['timestamp'].apply(lambda x:x.diff())
data['{}_{}_time_diff'.format(col[0],col[1])] = data_temp['{}_{}_time_diff'.format(col[0],col[1])].fillna(0)
return data
def CombinationFeature(data):
print("==============convert_data===============")
data['tm_hour'] = data['hour'] + data['min']/60
data['tm_hour_sin'] = data['tm_hour'].map(lambda x:np.sin((x-12)/24*2*np.pi))
data['tm_hour_cos'] = data['tm_hour'].map(lambda x:np.cos((x-12)/24*2*np.pi))
data_time=data[['user_id','day','hour','min']]
user_query_day = data.groupby(['user_id', 'day']).size().reset_index().rename(columns={0: 'user_query_day'})
user_query_day_hour = data.groupby(['user_id', 'day', 'hour']).size().reset_index().rename(columns={0: 'user_query_day_hour'})
user_query_day_hour_min = data.groupby(['user_id', 'day', 'hour','min']).size().reset_index().rename(columns={0: 'user_query_day_hour_min'})
user_query_day_hour_min_sec = data.groupby(['user_id', 'day', 'hour','min','sec']).size().reset_index().rename(columns={0: 'user_query_day_hour_min_sec'})
user_day_hourmin_mean= data_time.groupby(['user_id', 'day']).mean().reset_index().rename(columns={'hour': 'mean_hour','min':'mean_minuite'})
user_day_hourmin_std= data_time.groupby(['user_id', 'day']).std().reset_index().rename(columns={'hour': 'std_hour','min':'std_minuite'})
user_day_hourmin_max= data_time.groupby(['user_id', 'day']).max().reset_index().rename(columns={'hour': 'max_hour','min':'max_minuite'})
user_day_hourmin_min= data_time.groupby(['user_id', 'day']).min().reset_index().rename(columns={'hour': 'min_hour','min':'min_minuite'})
#-------merge-----
data = pd.merge(data, user_query_day, 'left', on=['user_id', 'day'])
data = pd.merge(data, user_query_day_hour, 'left',on=['user_id', 'day', 'hour'])
data = pd.merge(data, user_query_day_hour_min, 'left',on=['user_id', 'day', 'hour','min'])
data = pd.merge(data, user_query_day_hour_min_sec, 'left',on=['user_id', 'day', 'hour','min','sec'])
data = pd.merge(data, user_day_hourmin_mean, 'left',on=['user_id','day'])
data = pd.merge(data, user_day_hourmin_std, 'left',on=['user_id','day'])
data = pd.merge(data, user_day_hourmin_max, 'left',on=['user_id','day'])
data = pd.merge(data, user_day_hourmin_min, 'left',on=['user_id','day'])
#==============================click_feat================================
data_temp = data.copy()
columns_click = [
#--单个--
['user_id'],
['item_id'],
['context_id'],
['shop_id'],
['item_brand_id'],
['item_city_id'],
['context_page_id'],
['item_category_2'],
['item_property_list_1'],
['item_property_list_2'],
['item_property_list_3'],
['predict_category_property_A'],
['predict_category_property_B'],
['predict_category_property_C'],
['predict_category_property_A_1'],
['predict_category_property_B_1'],
['predict_category_property_C_1'],
['predict_category_property_A_2'],
['predict_category_property_B_2'],
['predict_category_property_C_2'],
#--加day--
['user_id','day','item_id'],
['user_id','day','context_id'],
['user_id','day','shop_id'],
['user_id','day','item_brand_id'],
['user_id','day','item_city_id'],
['user_id','day','context_page_id'],
['user_id','day','item_category_2'],
['user_id','day','item_property_list_1'],
['user_id','day','item_property_list_2'],
['user_id','day','item_property_list_3'],
['user_id','day','predict_category_property_A'],
['user_id','day','predict_category_property_B'],
['user_id','day','predict_category_property_C'],
['user_id','day','predict_category_property_A_1'],
['user_id','day','predict_category_property_B_1'],
['user_id','day','predict_category_property_C_1'],
['user_id','day','predict_category_property_A_2'],
['user_id','day','predict_category_property_B_2'],
['user_id','day','predict_category_property_C_2'],
#--加day,hour--
['user_id','day','hour','item_id'],
['user_id','day','hour','context_id'],
['user_id','day','hour','shop_id'],
['user_id','day','hour','item_brand_id'],
['user_id','day','hour','item_city_id'],
['user_id','day','hour','context_page_id'],
['user_id','day','hour','item_category_2'],
['user_id','day','hour','item_property_list_1'],
['user_id','day','hour','item_property_list_2'],
['user_id','day','hour','item_property_list_3'],
['user_id','day','hour','predict_category_property_A'],
['user_id','day','hour','predict_category_property_B'],
['user_id','day','hour','predict_category_property_C'],
['user_id','day','hour','predict_category_property_A_1'],
['user_id','day','hour','predict_category_property_B_1'],
['user_id','day','hour','predict_category_property_C_1'],
['user_id','day','hour','predict_category_property_A_2'],
['user_id','day','hour','predict_category_property_B_2'],
['user_id','day','hour','predict_category_property_C_2'],
#--加hour--
['user_id','hour','item_id'],
['user_id','hour','shop_id'],
['user_id','hour','item_brand_id'],
['user_id','hour','item_city_id'],
['user_id','hour','context_page_id'],
['user_id','hour','item_category_2'],
['user_id','hour','item_property_list_1'],
['user_id','hour','item_property_list_2'],
['user_id','hour','item_property_list_3'],
['user_id','hour','predict_category_property_A'],
['user_id','hour','predict_category_property_B'],
['user_id','hour','predict_category_property_C'],
['user_id','hour','predict_category_property_A_1'],
['user_id','hour','predict_category_property_B_1'],
['user_id','hour','predict_category_property_C_1'],
['user_id','hour','predict_category_property_A_2'],
['user_id','hour','predict_category_property_B_2'],
['user_id','hour','predict_category_property_C_2']
]
for c in columns_click:
if isinstance(c, (list, tuple)):
pair = [cc for cc in c]
c = '_'.join(c)
else:
pair = [c]
click_temp = data_temp.groupby(pair).size().reset_index().rename(columns={0: '{}_click'.format(c)})
data = pd.merge(data, click_temp, how = 'left',on=pair)
return data
def load_feat():
train_together_data = pd.read_csv("data_handle/train_together_data_after.csv")
train_together_data = train_together_data[train_together_data['context_timestamp'] > '2018-09-06 23:59:59']
val_data = train_together_data[train_together_data['context_timestamp'] > '2018-09-07 10:59:59']
train_data = train_together_data[train_together_data['context_timestamp'] <= '2018-09-07 10:59:59']
# train_data = pd.read_csv("data_handle/train_data.csv")
# val_data = pd.read_csv("data_handle/valid_data.csv")
test_data = pd.read_csv("data_handle/test_data_after.csv")
del train_together_data['context_timestamp']
del train_data['context_timestamp']
del val_data['context_timestamp']
del test_data['context_timestamp']
return train_together_data,train_data,val_data,test_data
if __name__ == '__main__':
pass
|
#!/usr/bin/env python3
# from: http://sam.aiki.info/b/google-images.py
# requires: selenium, chromium-driver, retry
#
# $ pip install webdriver_manager
# $ pip install selenium retry
# $ brew install chromedriver
# install google.com/chrome
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
import selenium.common.exceptions as sel_ex
import sys
import time
import urllib.parse
from retry import retry
import argparse
import logging
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
logger = logging.getLogger()
retry_logger = None
css_thumbnail = "img.Q4LuWd"
css_large = "img.n3VNCb"
css_load_more = ".mye4qd"
selenium_exceptions = (sel_ex.ElementClickInterceptedException, sel_ex.ElementNotInteractableException, sel_ex.StaleElementReferenceException)
def scroll_to_end(wd):
wd.execute_script("window.scrollTo(0, document.body.scrollHeight);")
@retry(exceptions=KeyError, tries=6, delay=0.1, backoff=2, logger=retry_logger)
def get_thumbnails(wd, want_more_than=0):
wd.execute_script("document.querySelector('{}').click();".format(css_load_more))
thumbnails = wd.find_elements_by_css_selector(css_thumbnail)
n_results = len(thumbnails)
if n_results <= want_more_than:
raise KeyError("no new thumbnails")
return thumbnails
@retry(exceptions=KeyError, tries=6, delay=0.1, backoff=2, logger=retry_logger)
def get_image_src(wd):
actual_images = wd.find_elements_by_css_selector(css_large)
sources = []
for img in actual_images:
src = img.get_attribute("src")
if src.startswith("http") and not src.startswith("https://encrypted-tbn0.gstatic.com/"):
sources.append(src)
if not len(sources):
raise KeyError("no large image")
return sources
@retry(exceptions=selenium_exceptions, tries=6, delay=0.1, backoff=2, logger=retry_logger)
def retry_click(el):
el.click()
def get_images(wd, start=0, n=20, out=None):
thumbnails = []
count = len(thumbnails)
while count < n:
scroll_to_end(wd)
try:
thumbnails = get_thumbnails(wd, want_more_than=count)
except KeyError as e:
logger.warning("cannot load enough thumbnails")
break
count = len(thumbnails)
sources = []
for tn in thumbnails:
try:
retry_click(tn)
except selenium_exceptions as e:
logger.warning("main image click failed")
continue
sources1 = []
try:
sources1 = get_image_src(wd)
except KeyError as e:
pass
# logger.warning("main image not found")
if not sources1:
tn_src = tn.get_attribute("src")
if not tn_src.startswith("data"):
logger.warning("no src found for main image, using thumbnail")
sources1 = [tn_src]
else:
logger.warning("no src found for main image, thumbnail is a data URL")
for src in sources1:
if not src in sources:
sources.append(src)
if out:
print(src, file=out)
out.flush()
if len(sources) >= n:
break
return sources
def google_image_search(wd, query, safe="off", n=20, opts='', out=None):
search_url_t = "https://www.google.com/search?safe={safe}&site=&tbm=isch&source=hp&q={q}&oq={q}&gs_l=img&tbs={opts}"
search_url = search_url_t.format(q=urllib.parse.quote(query), opts=urllib.parse.quote(opts), safe=safe)
wd.get(search_url)
sources = get_images(wd, n=n, out=out)
return sources
def main():
parser = argparse.ArgumentParser(description='Fetch image URLs from Google Image Search.')
parser.add_argument('--safe', type=str, default="off", help='safe search [off|active|images]')
parser.add_argument('--opts', type=str, default="", help='search options, e.g. isz:lt,islt:svga,itp:photo,ic:color,ift:jpg')
parser.add_argument('query', type=str, help='image search query')
parser.add_argument('n', type=int, default=20, help='number of images (approx)')
args = parser.parse_args()
opts = Options()
opts.add_argument("--headless")
# opts.add_argument("--blink-settings=imagesEnabled=false")
# with webdriver.Chrome(options=opts) as wd:
with webdriver.Chrome(ChromeDriverManager().install(), options=opts) as wd:
sources = google_image_search(wd, args.query, safe=args.safe, n=args.n, opts=args.opts, out=sys.stdout)
main()
|
import json
from asynctest import mock as async_mock, TestCase as AsyncTestCase
from .....cache.base import BaseCache
from .....cache.in_memory import InMemoryCache
from .....connections.models.conn_record import ConnRecord
from .....connections.models.connection_target import ConnectionTarget
from .....connections.models.diddoc import (
DIDDoc,
PublicKey,
PublicKeyType,
Service,
)
from .....core.in_memory import InMemoryProfile
from .....ledger.base import BaseLedger
from .....ledger.error import LedgerError
from .....messaging.responder import BaseResponder, MockResponder
from .....messaging.decorators.attach_decorator import AttachDecorator
from .....multitenant.manager import MultitenantManager
from .....storage.error import StorageNotFoundError
from .....transport.inbound.receipt import MessageReceipt
from .....multitenant.manager import MultitenantManager
from .....wallet.base import DIDInfo
from .....wallet.in_memory import InMemoryWallet
from .....wallet.util import naked_to_did_key
from .....connections.base_manager import (
BaseConnectionManager,
BaseConnectionManagerError,
)
from ....connections.v1_0.messages.connection_invitation import ConnectionInvitation
from ....coordinate_mediation.v1_0.manager import MediationManager
from ....coordinate_mediation.v1_0.messages.keylist_update import (
KeylistUpdate,
KeylistUpdateRule,
)
from ....coordinate_mediation.v1_0.models.mediation_record import (
MediationRecord,
)
from ....didcomm_prefix import DIDCommPrefix
from ....out_of_band.v1_0.manager import OutOfBandManager
from ....out_of_band.v1_0.messages.invitation import HSProto, InvitationMessage
from ....out_of_band.v1_0.messages.service import Service as OOBService
from .. import manager as test_module
from ..manager import DIDXManager, DIDXManagerError
class TestConfig:
test_seed = "testseed000000000000000000000001"
test_did = "55GkHamhTU1ZbTbV2ab9DE"
test_verkey = "3Dn1SJNPaCXcvvJvSbsFWP2xaCjMom3can8CQNhWrTRx"
test_endpoint = "http://localhost"
test_target_did = "GbuDUYXaUZRfHD2jeDuQuP"
test_target_verkey = "9WCgWKUaAJj3VWxxtzvvMQN3AoFxoBtBDo9ntwJnVVCC"
def make_did_doc(self, did, verkey):
doc = DIDDoc(did=did)
controller = did
ident = "1"
pk_value = verkey
pk = PublicKey(
did, ident, pk_value, PublicKeyType.ED25519_SIG_2018, controller, False
)
doc.set(pk)
recip_keys = [pk]
router_keys = []
service = Service(
did, "indy", "IndyAgent", recip_keys, router_keys, TestConfig.test_endpoint
)
doc.set(service)
return doc
class TestDidExchangeManager(AsyncTestCase, TestConfig):
async def setUp(self):
self.responder = MockResponder()
self.session = InMemoryProfile.test_session(
{
"default_endpoint": "http://aries.ca/endpoint",
"default_label": "This guy",
"additional_endpoints": ["http://aries.ca/another-endpoint"],
"debug.auto_accept_invites": True,
"debug.auto_accept_requests": True,
"multitenant.enabled": True,
"wallet.id": True,
},
bind={BaseResponder: self.responder, BaseCache: InMemoryCache()},
)
self.context = self.session.context
self.did_info = await self.session.wallet.create_local_did()
self.ledger = async_mock.create_autospec(BaseLedger)
self.ledger.__aenter__ = async_mock.CoroutineMock(return_value=self.ledger)
self.ledger.get_endpoint_for_did = async_mock.CoroutineMock(
return_value=TestConfig.test_endpoint
)
self.session.context.injector.bind_instance(BaseLedger, self.ledger)
self.multitenant_mgr = async_mock.MagicMock(MultitenantManager, autospec=True)
self.session.context.injector.bind_instance(
MultitenantManager, self.multitenant_mgr
)
self.manager = DIDXManager(self.session)
assert self.manager.session
self.oob_manager = OutOfBandManager(self.session)
self.test_mediator_routing_keys = [
naked_to_did_key("3Dn1SJNPaCXcvvJvSbsFWP2xaCjMom3can8CQNhWrTRR")
]
self.test_mediator_conn_id = "mediator-conn-id"
self.test_mediator_endpoint = "http://mediator.example.com"
async def test_verify_diddoc(self):
did_doc = self.make_did_doc(
TestConfig.test_target_did,
TestConfig.test_target_verkey,
)
did_doc_attach = AttachDecorator.from_indy_dict(did_doc.serialize())
with self.assertRaises(DIDXManagerError):
await self.manager.verify_diddoc(self.session.wallet, did_doc_attach)
await did_doc_attach.data.sign(self.did_info.verkey, self.session.wallet)
await self.manager.verify_diddoc(self.session.wallet, did_doc_attach)
did_doc_attach.data.base64_ = "YmFpdCBhbmQgc3dpdGNo"
with self.assertRaises(DIDXManagerError):
await self.manager.verify_diddoc(self.session.wallet, did_doc_attach)
async def test_receive_invitation(self):
self.session.context.update_settings({"public_invites": True})
mediation_record = MediationRecord(
role=MediationRecord.ROLE_CLIENT,
state=MediationRecord.STATE_GRANTED,
connection_id=self.test_mediator_conn_id,
routing_keys=self.test_mediator_routing_keys,
endpoint=self.test_mediator_endpoint,
)
await mediation_record.save(self.session)
with async_mock.patch.object(
test_module, "AttachDecorator", autospec=True
) as mock_attach_deco, async_mock.patch.object(
self.multitenant_mgr, "get_default_mediator"
) as mock_get_default_mediator:
mock_get_default_mediator.return_value = mediation_record
invi_rec = await self.oob_manager.create_invitation(
my_endpoint="testendpoint",
hs_protos=[HSProto.RFC23],
)
invi_msg = InvitationMessage.deserialize(invi_rec.invitation)
mock_attach_deco.from_indy_dict = async_mock.MagicMock(
return_value=async_mock.MagicMock(
data=async_mock.MagicMock(sign=async_mock.CoroutineMock())
)
)
invitee_record = await self.manager.receive_invitation(invi_msg)
assert invitee_record.state == ConnRecord.State.REQUEST.rfc23
async def test_receive_invitation_no_auto_accept(self):
mediation_record = MediationRecord(
role=MediationRecord.ROLE_CLIENT,
state=MediationRecord.STATE_GRANTED,
connection_id=self.test_mediator_conn_id,
routing_keys=self.test_mediator_routing_keys,
endpoint=self.test_mediator_endpoint,
)
await mediation_record.save(self.session)
with async_mock.patch.object(
self.multitenant_mgr, "get_default_mediator"
) as mock_get_default_mediator:
mock_get_default_mediator.return_value = mediation_record
invi_rec = await self.oob_manager.create_invitation(
my_endpoint="testendpoint",
hs_protos=[HSProto.RFC23],
)
invitee_record = await self.manager.receive_invitation(
InvitationMessage.deserialize(invi_rec.invitation), auto_accept=False
)
assert invitee_record.state == ConnRecord.State.INVITATION.rfc23
async def test_receive_invitation_bad_invitation(self):
x_invites = [
InvitationMessage(),
InvitationMessage(service=[OOBService()]),
InvitationMessage(
service=[
OOBService(
recipient_keys=["3Dn1SJNPaCXcvvJvSbsFWP2xaCjMom3can8CQNhWrTRx"]
)
]
),
]
for x_invite in x_invites:
with self.assertRaises(DIDXManagerError):
await self.manager.receive_invitation(x_invite)
async def test_create_request_implicit(self):
mediation_record = MediationRecord(
role=MediationRecord.ROLE_CLIENT,
state=MediationRecord.STATE_GRANTED,
connection_id=self.test_mediator_conn_id,
routing_keys=self.test_mediator_routing_keys,
endpoint=self.test_mediator_endpoint,
)
await mediation_record.save(self.session)
with async_mock.patch.object(
self.manager, "create_did_document", async_mock.CoroutineMock()
) as mock_create_did_doc, async_mock.patch.object(
self.multitenant_mgr, "get_default_mediator"
) as mock_get_default_mediator:
mock_get_default_mediator.return_value = mediation_record
mock_create_did_doc.return_value = async_mock.MagicMock(
serialize=async_mock.MagicMock(return_value={})
)
didx_req = await self.manager.create_request_implicit(
their_public_did=TestConfig.test_target_did,
my_label=None,
my_endpoint=None,
mediation_id=mediation_record._id,
)
assert didx_req._id
async def test_create_request(self):
mock_conn_rec = async_mock.MagicMock(
connection_id="dummy",
my_did=self.did_info.did,
their_did=TestConfig.test_target_did,
their_role=ConnRecord.Role.RESPONDER.rfc23,
state=ConnRecord.State.REQUEST.rfc23,
retrieve_invitation=async_mock.CoroutineMock(
return_value=async_mock.MagicMock(
service_blocks=None,
service_dids=[TestConfig.test_target_did],
)
),
save=async_mock.CoroutineMock(),
)
with async_mock.patch.object(
self.manager, "create_did_document", async_mock.CoroutineMock()
) as mock_create_did_doc:
mock_create_did_doc.return_value = async_mock.MagicMock(
serialize=async_mock.MagicMock(return_value={})
)
didx_req = await self.manager.create_request(mock_conn_rec)
assert didx_req
async def test_create_request_multitenant(self):
self.context.update_settings(
{"multitenant.enabled": True, "wallet.id": "test_wallet"}
)
with async_mock.patch.object(
InMemoryWallet, "create_local_did", autospec=True
) as mock_wallet_create_local_did, async_mock.patch.object(
self.manager, "create_did_document", async_mock.CoroutineMock()
) as mock_create_did_doc, async_mock.patch.object(
test_module, "AttachDecorator", autospec=True
) as mock_attach_deco:
mock_create_did_doc.return_value = async_mock.MagicMock(
serialize=async_mock.MagicMock(return_value={})
)
mock_wallet_create_local_did.return_value = DIDInfo(
TestConfig.test_did, TestConfig.test_verkey, None
)
mock_attach_deco.from_indy_dict = async_mock.MagicMock(
return_value=async_mock.MagicMock(
data=async_mock.MagicMock(sign=async_mock.CoroutineMock())
)
)
await self.manager.create_request(
async_mock.MagicMock(
invitation_key=TestConfig.test_verkey,
their_label="Hello",
their_role=ConnRecord.Role.RESPONDER.rfc160,
alias="Bob",
my_did=None,
retrieve_invitation=async_mock.CoroutineMock(
return_value=async_mock.MagicMock(
service_blocks=None,
service_dids=[TestConfig.test_target_did],
)
),
save=async_mock.CoroutineMock(),
)
)
self.multitenant_mgr.add_key.assert_called_once_with(
"test_wallet", TestConfig.test_verkey
)
async def test_create_request_mediation_id(self):
mediation_record = MediationRecord(
role=MediationRecord.ROLE_CLIENT,
state=MediationRecord.STATE_GRANTED,
connection_id=self.test_mediator_conn_id,
routing_keys=self.test_mediator_routing_keys,
endpoint=self.test_mediator_endpoint,
)
await mediation_record.save(self.session)
invi = InvitationMessage(
comment="test",
handshake_protocols=[
pfx.qualify(HSProto.RFC23.name) for pfx in DIDCommPrefix
],
service_dids=[TestConfig.test_did],
)
record = ConnRecord(
invitation_key=TestConfig.test_verkey,
invitation_msg_id=invi._id,
their_label="Hello",
their_role=ConnRecord.Role.RESPONDER.rfc160,
alias="Bob",
my_did=None,
)
await record.save(self.session)
await record.attach_invitation(self.session, invi)
with async_mock.patch.object(
self.manager, "create_did_document", async_mock.CoroutineMock()
) as mock_create_did_doc:
mock_create_did_doc.return_value = async_mock.MagicMock(
serialize=async_mock.MagicMock(return_value={})
)
didx_req = await self.manager.create_request(
record,
my_endpoint="http://testendpoint.com/endpoint",
mediation_id=mediation_record._id,
)
assert didx_req
assert len(self.responder.messages) == 1
message, used_kwargs = self.responder.messages[0]
assert isinstance(message, KeylistUpdate)
assert (
"connection_id" in used_kwargs
and used_kwargs["connection_id"] == self.test_mediator_conn_id
)
async def test_create_request_my_endpoint(self):
mock_conn_rec = async_mock.MagicMock(
connection_id="dummy",
my_did=None,
their_did=TestConfig.test_target_did,
their_role=ConnRecord.Role.RESPONDER.rfc23,
their_label="Bob",
invitation_key=TestConfig.test_verkey,
state=ConnRecord.State.REQUEST.rfc23,
alias="Bob",
retrieve_invitation=async_mock.CoroutineMock(
return_value=async_mock.MagicMock(
service_blocks=None,
service_dids=[TestConfig.test_target_did],
)
),
save=async_mock.CoroutineMock(),
)
with async_mock.patch.object(
self.manager, "create_did_document", async_mock.CoroutineMock()
) as mock_create_did_doc:
mock_create_did_doc.return_value = async_mock.MagicMock(
serialize=async_mock.MagicMock(return_value={})
)
didx_req = await self.manager.create_request(
mock_conn_rec,
my_endpoint="http://testendpoint.com/endpoint",
)
assert didx_req
async def test_receive_request_explicit_public_did(self):
mock_request = async_mock.MagicMock(
did=TestConfig.test_did,
did_doc_attach=async_mock.MagicMock(
data=async_mock.MagicMock(
verify=async_mock.CoroutineMock(return_value=True),
signed=async_mock.MagicMock(
decode=async_mock.MagicMock(return_value="dummy-did-doc")
),
)
),
_thread=async_mock.MagicMock(pthid="did:sov:publicdid0000000000000"),
)
mediation_record = MediationRecord(
role=MediationRecord.ROLE_CLIENT,
state=MediationRecord.STATE_GRANTED,
connection_id=self.test_mediator_conn_id,
routing_keys=self.test_mediator_routing_keys,
endpoint=self.test_mediator_endpoint,
)
await mediation_record.save(self.session)
await self.session.wallet.create_local_did(seed=None, did=TestConfig.test_did)
STATE_REQUEST = ConnRecord.State.REQUEST
self.session.context.update_settings({"public_invites": True})
ACCEPT_AUTO = ConnRecord.ACCEPT_AUTO
with async_mock.patch.object(
test_module, "ConnRecord", async_mock.MagicMock()
) as mock_conn_rec_cls, async_mock.patch.object(
test_module, "DIDDoc", autospec=True
) as mock_did_doc, async_mock.patch.object(
test_module, "DIDPosture", autospec=True
) as mock_did_posture, async_mock.patch.object(
test_module, "AttachDecorator", autospec=True
) as mock_attach_deco, async_mock.patch.object(
test_module, "DIDXResponse", autospec=True
) as mock_response, async_mock.patch.object(
self.manager, "create_did_document", async_mock.CoroutineMock()
) as mock_create_did_doc, async_mock.patch.object(
MediationManager, "prepare_request", autospec=True
) as mock_mediation_mgr_prep_req:
mock_create_did_doc.return_value = async_mock.MagicMock(
serialize=async_mock.MagicMock(return_value={})
)
mock_mediation_mgr_prep_req.return_value = (mediation_record, mock_request)
mock_conn_record = async_mock.MagicMock(
accept=ACCEPT_AUTO,
my_did=None,
state=STATE_REQUEST.rfc23,
attach_request=async_mock.CoroutineMock(),
retrieve_request=async_mock.CoroutineMock(),
metadata_get_all=async_mock.CoroutineMock(return_value={}),
metadata_get=async_mock.CoroutineMock(return_value=True),
save=async_mock.CoroutineMock(),
)
mock_conn_rec_cls.ACCEPT_AUTO = ConnRecord.ACCEPT_AUTO
mock_conn_rec_cls.State.REQUEST = STATE_REQUEST
mock_conn_rec_cls.State.get = async_mock.MagicMock(
return_value=STATE_REQUEST
)
mock_conn_rec_cls.retrieve_by_id = async_mock.CoroutineMock(
return_value=async_mock.MagicMock(save=async_mock.CoroutineMock())
)
mock_conn_rec_cls.retrieve_by_invitation_key = async_mock.CoroutineMock(
return_value=mock_conn_record
)
mock_conn_rec_cls.return_value = mock_conn_record
mock_did_posture.get = async_mock.MagicMock(
return_value=test_module.DIDPosture.PUBLIC
)
mock_did_doc.from_json = async_mock.MagicMock(
return_value=async_mock.MagicMock(did=TestConfig.test_did)
)
mock_attach_deco.from_indy_dict = async_mock.MagicMock(
return_value=async_mock.MagicMock(
data=async_mock.MagicMock(sign=async_mock.CoroutineMock())
)
)
mock_response.return_value = async_mock.MagicMock(
assign_thread_from=async_mock.MagicMock(),
assign_trace_from=async_mock.MagicMock(),
)
conn_rec = await self.manager.receive_request(
request=mock_request,
recipient_did=TestConfig.test_did,
recipient_verkey=None,
my_endpoint=None,
alias=None,
auto_accept_implicit=None,
mediation_id=None,
)
assert conn_rec
messages = self.responder.messages
assert len(messages) == 2
(result, target) = messages[0]
assert "connection_id" in target
async def test_receive_request_invi_not_found(self):
mock_request = async_mock.MagicMock(
did=TestConfig.test_did,
did_doc_attach=None,
_thread=async_mock.MagicMock(pthid="explicit-not-a-did"),
)
await self.session.wallet.create_local_did(seed=None, did=TestConfig.test_did)
with async_mock.patch.object(
test_module, "ConnRecord", async_mock.MagicMock()
) as mock_conn_rec_cls:
mock_conn_rec_cls.retrieve_by_invitation_key = async_mock.CoroutineMock(
side_effect=StorageNotFoundError()
)
with self.assertRaises(DIDXManagerError) as context:
await self.manager.receive_request(
request=mock_request,
recipient_did=TestConfig.test_did,
recipient_verkey=TestConfig.test_verkey,
my_endpoint=None,
alias=None,
auto_accept_implicit=None,
mediation_id=None,
)
assert "No explicit invitation found" in str(context.exception)
async def test_receive_request_with_mediator_without_multi_use_multitenant(self):
multiuse_info = await self.session.wallet.create_local_did()
did_doc_dict = self.make_did_doc(
did=TestConfig.test_target_did,
verkey=TestConfig.test_target_verkey,
).serialize()
del did_doc_dict["authentication"]
del did_doc_dict["service"]
new_info = await self.session.wallet.create_local_did()
mock_request = async_mock.MagicMock()
mock_request.connection = async_mock.MagicMock(
is_multiuse_invitation=False, invitation_key=multiuse_info.verkey
)
mock_request.connection.did = TestConfig.test_did
mock_request.connection.did_doc = async_mock.MagicMock()
mock_request.connection.did_doc.did = TestConfig.test_did
mock_request.did = self.test_target_did
mock_request.did_doc_attach = async_mock.MagicMock(
data=async_mock.MagicMock(
verify=async_mock.CoroutineMock(return_value=True),
signed=async_mock.MagicMock(
decode=async_mock.MagicMock(return_value=json.dumps(did_doc_dict))
),
)
)
await self.session.wallet.create_local_did(seed=None, did=TestConfig.test_did)
mediation_record = MediationRecord(
role=MediationRecord.ROLE_CLIENT,
state=MediationRecord.STATE_GRANTED,
connection_id=self.test_mediator_conn_id,
routing_keys=self.test_mediator_routing_keys,
endpoint=self.test_mediator_endpoint,
)
await mediation_record.save(self.session)
record = ConnRecord(
invitation_key=TestConfig.test_verkey,
their_label="Hello",
their_role=ConnRecord.Role.RESPONDER.rfc160,
alias="Bob",
)
record.accept = ConnRecord.ACCEPT_MANUAL
await record.save(self.session)
with async_mock.patch.object(
ConnRecord, "save", autospec=True
) as mock_conn_rec_save, async_mock.patch.object(
ConnRecord, "attach_request", autospec=True
) as mock_conn_attach_request, async_mock.patch.object(
ConnRecord, "retrieve_by_invitation_key"
) as mock_conn_retrieve_by_invitation_key, async_mock.patch.object(
ConnRecord, "retrieve_request", autospec=True
):
mock_conn_retrieve_by_invitation_key.return_value = record
conn_rec = await self.manager.receive_request(
request=mock_request,
recipient_did=TestConfig.test_did,
recipient_verkey=TestConfig.test_verkey,
my_endpoint=None,
alias=None,
auto_accept_implicit=None,
mediation_id=mediation_record.mediation_id,
)
assert len(self.responder.messages) == 1
message, target = self.responder.messages[0]
assert isinstance(message, KeylistUpdate)
assert len(message.updates) == 1
(remove,) = message.updates
assert remove.action == KeylistUpdateRule.RULE_REMOVE
assert remove.recipient_key == record.invitation_key
async def test_receive_request_with_mediator_without_multi_use_multitenant_mismatch(
self,
):
multiuse_info = await self.session.wallet.create_local_did()
did_doc_dict = self.make_did_doc(
did=TestConfig.test_target_did,
verkey=TestConfig.test_target_verkey,
).serialize()
del did_doc_dict["authentication"]
del did_doc_dict["service"]
new_info = await self.session.wallet.create_local_did()
mock_request = async_mock.MagicMock()
mock_request.connection = async_mock.MagicMock(
is_multiuse_invitation=False, invitation_key=multiuse_info.verkey
)
mock_request.connection.did = TestConfig.test_did
mock_request.connection.did_doc = async_mock.MagicMock()
mock_request.connection.did_doc.did = TestConfig.test_did
mock_request.did_doc_attach = async_mock.MagicMock(
data=async_mock.MagicMock(
verify=async_mock.CoroutineMock(return_value=True),
signed=async_mock.MagicMock(
decode=async_mock.MagicMock(return_value=json.dumps(did_doc_dict))
),
)
)
await self.session.wallet.create_local_did(seed=None, did=TestConfig.test_did)
mediation_record = MediationRecord(
role=MediationRecord.ROLE_CLIENT,
state=MediationRecord.STATE_GRANTED,
connection_id=self.test_mediator_conn_id,
routing_keys=self.test_mediator_routing_keys,
endpoint=self.test_mediator_endpoint,
)
await mediation_record.save(self.session)
record = ConnRecord(
invitation_key=TestConfig.test_verkey,
their_label="Hello",
their_role=ConnRecord.Role.RESPONDER.rfc160,
alias="Bob",
)
record.accept = ConnRecord.ACCEPT_MANUAL
await record.save(self.session)
with async_mock.patch.object(
ConnRecord, "save", autospec=True
) as mock_conn_rec_save, async_mock.patch.object(
ConnRecord, "attach_request", autospec=True
) as mock_conn_attach_request, async_mock.patch.object(
ConnRecord, "retrieve_by_invitation_key"
) as mock_conn_retrieve_by_invitation_key, async_mock.patch.object(
ConnRecord, "retrieve_request", autospec=True
):
mock_conn_retrieve_by_invitation_key.return_value = record
with self.assertRaises(DIDXManagerError) as context:
conn_rec = await self.manager.receive_request(
request=mock_request,
recipient_did=TestConfig.test_did,
recipient_verkey=TestConfig.test_verkey,
my_endpoint=None,
alias=None,
auto_accept_implicit=None,
mediation_id=mediation_record.mediation_id,
)
assert "does not match" in str(context.exception)
async def test_receive_request_public_did_no_did_doc_attachment(self):
mock_request = async_mock.MagicMock(
did=TestConfig.test_did,
did_doc_attach=None,
_thread=async_mock.MagicMock(pthid="did:sov:publicdid0000000000000"),
)
await self.session.wallet.create_local_did(seed=None, did=TestConfig.test_did)
self.session.context.update_settings({"public_invites": True})
mock_conn_rec_state_request = ConnRecord.State.REQUEST
with async_mock.patch.object(
test_module, "ConnRecord", async_mock.MagicMock()
) as mock_conn_rec_cls, async_mock.patch.object(
test_module, "DIDPosture", autospec=True
) as mock_did_posture:
mock_conn_record = async_mock.MagicMock(
accept=ConnRecord.ACCEPT_MANUAL,
my_did=None,
state=mock_conn_rec_state_request.rfc23,
attach_request=async_mock.CoroutineMock(),
retrieve_request=async_mock.CoroutineMock(),
metadata_get_all=async_mock.CoroutineMock(return_value={}),
save=async_mock.CoroutineMock(),
)
mock_conn_rec_cls.return_value = mock_conn_record
mock_conn_rec_cls.retrieve_by_invitation_key = async_mock.CoroutineMock(
return_value=mock_conn_record
)
mock_did_posture.get = async_mock.MagicMock(
return_value=test_module.DIDPosture.PUBLIC
)
with self.assertRaises(DIDXManagerError) as context:
await self.manager.receive_request(
request=mock_request,
recipient_did=TestConfig.test_did,
recipient_verkey=None,
my_endpoint=TestConfig.test_endpoint,
alias="Alias",
auto_accept_implicit=None,
mediation_id=None,
)
assert "DID Doc attachment missing or has no data" in str(context.exception)
async def test_receive_request_public_did_x_not_public(self):
mock_request = async_mock.MagicMock(
did=TestConfig.test_did,
did_doc_attach=async_mock.MagicMock(
data=async_mock.MagicMock(
verify=async_mock.CoroutineMock(return_value=True),
signed=async_mock.MagicMock(
decode=async_mock.MagicMock(return_value="dummy-did-doc")
),
)
),
_thread=async_mock.MagicMock(pthid="did:sov:publicdid0000000000000"),
)
await self.session.wallet.create_local_did(seed=None, did=TestConfig.test_did)
self.session.context.update_settings({"public_invites": True})
mock_conn_rec_state_request = ConnRecord.State.REQUEST
with async_mock.patch.object(
test_module, "DIDPosture", autospec=True
) as mock_did_posture:
mock_did_posture.get = async_mock.MagicMock(
return_value=test_module.DIDPosture.WALLET_ONLY
)
with self.assertRaises(DIDXManagerError) as context:
await self.manager.receive_request(
request=mock_request,
recipient_did=TestConfig.test_did,
recipient_verkey=None,
my_endpoint=TestConfig.test_endpoint,
alias="Alias",
auto_accept_implicit=False,
mediation_id=None,
)
assert "is not public" in str(context.exception)
async def test_receive_request_public_did_x_wrong_did(self):
mock_request = async_mock.MagicMock(
did=TestConfig.test_did,
did_doc_attach=async_mock.MagicMock(
data=async_mock.MagicMock(
verify=async_mock.CoroutineMock(return_value=True),
signed=async_mock.MagicMock(
decode=async_mock.MagicMock(return_value="dummy-did-doc")
),
)
),
_thread=async_mock.MagicMock(pthid="did:sov:publicdid0000000000000"),
)
await self.session.wallet.create_local_did(seed=None, did=TestConfig.test_did)
self.session.context.update_settings({"public_invites": True})
mock_conn_rec_state_request = ConnRecord.State.REQUEST
with async_mock.patch.object(
test_module, "ConnRecord", async_mock.MagicMock()
) as mock_conn_rec_cls, async_mock.patch.object(
test_module, "DIDPosture", autospec=True
) as mock_did_posture, async_mock.patch.object(
test_module.DIDDoc, "from_json", async_mock.MagicMock()
) as mock_did_doc_from_json:
mock_conn_record = async_mock.MagicMock(
accept=ConnRecord.ACCEPT_MANUAL,
my_did=None,
state=mock_conn_rec_state_request.rfc23,
attach_request=async_mock.CoroutineMock(),
retrieve_request=async_mock.CoroutineMock(),
metadata_get_all=async_mock.CoroutineMock(return_value={}),
save=async_mock.CoroutineMock(),
)
mock_conn_rec_cls.return_value = mock_conn_record
mock_conn_rec_cls.retrieve_by_invitation_key = async_mock.CoroutineMock(
return_value=mock_conn_record
)
mock_did_doc_from_json.return_value = async_mock.MagicMock(did="wrong-did")
mock_did_posture.get = async_mock.MagicMock(
return_value=test_module.DIDPosture.PUBLIC
)
with self.assertRaises(DIDXManagerError) as context:
await self.manager.receive_request(
request=mock_request,
recipient_did=TestConfig.test_did,
recipient_verkey=None,
my_endpoint=TestConfig.test_endpoint,
alias="Alias",
auto_accept_implicit=False,
mediation_id=None,
)
assert "does not match" in str(context.exception)
async def test_receive_request_public_did_x_did_doc_attach_bad_sig(self):
mock_request = async_mock.MagicMock(
did=TestConfig.test_did,
did_doc_attach=async_mock.MagicMock(
data=async_mock.MagicMock(
verify=async_mock.CoroutineMock(return_value=False)
)
),
_thread=async_mock.MagicMock(pthid="did:sov:publicdid0000000000000"),
)
await self.session.wallet.create_local_did(seed=None, did=TestConfig.test_did)
self.session.context.update_settings({"public_invites": True})
mock_conn_rec_state_request = ConnRecord.State.REQUEST
with async_mock.patch.object(
test_module, "ConnRecord", async_mock.MagicMock()
) as mock_conn_rec_cls, async_mock.patch.object(
test_module, "DIDPosture", autospec=True
) as mock_did_posture:
mock_conn_record = async_mock.MagicMock(
accept=ConnRecord.ACCEPT_MANUAL,
my_did=None,
state=mock_conn_rec_state_request.rfc23,
attach_request=async_mock.CoroutineMock(),
retrieve_request=async_mock.CoroutineMock(),
metadata_get_all=async_mock.CoroutineMock(return_value={}),
save=async_mock.CoroutineMock(),
)
mock_conn_rec_cls.return_value = mock_conn_record
mock_conn_rec_cls.retrieve_by_invitation_key = async_mock.CoroutineMock(
return_value=mock_conn_record
)
mock_did_posture.get = async_mock.MagicMock(
return_value=test_module.DIDPosture.PUBLIC
)
with self.assertRaises(DIDXManagerError) as context:
await self.manager.receive_request(
request=mock_request,
recipient_did=TestConfig.test_did,
recipient_verkey=None,
my_endpoint=TestConfig.test_endpoint,
alias="Alias",
auto_accept_implicit=False,
mediation_id=None,
)
assert "DID Doc signature failed" in str(context.exception)
async def test_receive_request_public_did_no_public_invites(self):
mock_request = async_mock.MagicMock(
did=TestConfig.test_did,
did_doc_attach=async_mock.MagicMock(
data=async_mock.MagicMock(
verify=async_mock.CoroutineMock(return_value=True),
signed=async_mock.MagicMock(
decode=async_mock.MagicMock(return_value="dummy-did-doc")
),
)
),
_thread=async_mock.MagicMock(pthid="did:sov:publicdid0000000000000"),
)
await self.session.wallet.create_local_did(seed=None, did=TestConfig.test_did)
self.session.context.update_settings({"public_invites": False})
with async_mock.patch.object(
test_module, "ConnRecord", async_mock.MagicMock()
) as mock_conn_rec_cls, async_mock.patch.object(
test_module, "AttachDecorator", autospec=True
) as mock_attach_deco, async_mock.patch.object(
test_module, "DIDXResponse", autospec=True
) as mock_response, async_mock.patch.object(
self.manager, "create_did_document", async_mock.CoroutineMock()
) as mock_create_did_doc, async_mock.patch.object(
test_module.DIDDoc, "from_json", async_mock.MagicMock()
) as mock_did_doc_from_json:
mock_did_doc_from_json.return_value = async_mock.MagicMock(
did=TestConfig.test_did
)
with self.assertRaises(DIDXManagerError) as context:
await self.manager.receive_request(
request=mock_request,
recipient_did=TestConfig.test_did,
recipient_verkey=None,
my_endpoint=TestConfig.test_endpoint,
alias="Alias",
auto_accept_implicit=False,
mediation_id=None,
)
assert "Public invitations are not enabled" in str(context.exception)
async def test_receive_request_public_did_no_auto_accept(self):
mock_request = async_mock.MagicMock(
did=TestConfig.test_did,
did_doc_attach=async_mock.MagicMock(
data=async_mock.MagicMock(
verify=async_mock.CoroutineMock(return_value=True),
signed=async_mock.MagicMock(
decode=async_mock.MagicMock(return_value="dummy-did-doc")
),
)
),
_thread=async_mock.MagicMock(pthid="did:sov:publicdid0000000000000"),
)
await self.session.wallet.create_local_did(seed=None, did=TestConfig.test_did)
self.session.context.update_settings(
{"public_invites": True, "debug.auto_accept_requests": False}
)
mock_conn_rec_state_request = ConnRecord.State.REQUEST
with async_mock.patch.object(
test_module, "ConnRecord", async_mock.MagicMock()
) as mock_conn_rec_cls, async_mock.patch.object(
test_module, "DIDDoc", autospec=True
) as mock_did_doc, async_mock.patch.object(
test_module, "DIDPosture", autospec=True
) as mock_did_posture, async_mock.patch.object(
test_module, "AttachDecorator", autospec=True
) as mock_attach_deco, async_mock.patch.object(
test_module, "DIDXResponse", autospec=True
) as mock_response, async_mock.patch.object(
self.manager, "create_did_document", async_mock.CoroutineMock()
) as mock_create_did_doc:
mock_conn_record = async_mock.MagicMock(
accept=ConnRecord.ACCEPT_MANUAL,
my_did=None,
state=mock_conn_rec_state_request.rfc23,
attach_request=async_mock.CoroutineMock(),
retrieve_request=async_mock.CoroutineMock(),
metadata_get_all=async_mock.CoroutineMock(return_value={}),
save=async_mock.CoroutineMock(),
)
mock_conn_rec_cls.return_value = mock_conn_record
mock_conn_rec_cls.retrieve_by_invitation_key = async_mock.CoroutineMock(
return_value=mock_conn_record
)
mock_did_posture.get = async_mock.MagicMock(
return_value=test_module.DIDPosture.PUBLIC
)
mock_did_doc.from_json = async_mock.MagicMock(
return_value=async_mock.MagicMock(did=TestConfig.test_did)
)
conn_rec = await self.manager.receive_request(
request=mock_request,
recipient_did=TestConfig.test_did,
recipient_verkey=None,
my_endpoint=TestConfig.test_endpoint,
alias="Alias",
auto_accept_implicit=False,
mediation_id=None,
)
assert conn_rec
messages = self.responder.messages
assert not messages
async def test_receive_request_peer_did(self):
mock_request = async_mock.MagicMock(
did=TestConfig.test_did,
did_doc_attach=async_mock.MagicMock(
data=async_mock.MagicMock(
verify=async_mock.CoroutineMock(return_value=True),
signed=async_mock.MagicMock(
decode=async_mock.MagicMock(return_value="dummy-did-doc")
),
)
),
_thread=async_mock.MagicMock(pthid="dummy-pthid"),
)
mock_conn = async_mock.MagicMock(
my_did=TestConfig.test_did,
their_did=TestConfig.test_target_did,
invitation_key=TestConfig.test_verkey,
connection_id="dummy",
is_multiuse_invitation=True,
state=ConnRecord.State.INVITATION.rfc23,
their_role=ConnRecord.Role.REQUESTER.rfc23,
save=async_mock.CoroutineMock(),
attach_request=async_mock.CoroutineMock(),
accept=ConnRecord.ACCEPT_MANUAL,
metadata_get_all=async_mock.CoroutineMock(return_value={"test": "value"}),
)
mock_conn_rec_state_request = ConnRecord.State.REQUEST
await self.session.wallet.create_local_did(seed=None, did=TestConfig.test_did)
self.session.context.update_settings({"public_invites": True})
with async_mock.patch.object(
test_module, "ConnRecord", async_mock.MagicMock()
) as mock_conn_rec_cls, async_mock.patch.object(
test_module, "DIDDoc", autospec=True
) as mock_did_doc, async_mock.patch.object(
test_module, "AttachDecorator", autospec=True
) as mock_attach_deco, async_mock.patch.object(
test_module, "DIDXResponse", autospec=True
) as mock_response:
mock_conn_rec_cls.retrieve_by_invitation_key = async_mock.CoroutineMock(
return_value=mock_conn
)
mock_conn_rec_cls.return_value = async_mock.MagicMock(
accept=ConnRecord.ACCEPT_AUTO,
my_did=None,
state=mock_conn_rec_state_request.rfc23,
attach_request=async_mock.CoroutineMock(),
retrieve_request=async_mock.CoroutineMock(),
save=async_mock.CoroutineMock(),
metadata_set=async_mock.CoroutineMock(),
)
mock_did_doc.from_json = async_mock.MagicMock(
return_value=async_mock.MagicMock(did=TestConfig.test_did)
)
mock_attach_deco.from_indy_dict = async_mock.MagicMock(
return_value=async_mock.MagicMock(
data=async_mock.MagicMock(sign=async_mock.CoroutineMock())
)
)
mock_response.return_value = async_mock.MagicMock(
assign_thread_from=async_mock.MagicMock(),
assign_trace_from=async_mock.MagicMock(),
)
conn_rec = await self.manager.receive_request(
request=mock_request,
recipient_did=TestConfig.test_did,
recipient_verkey=TestConfig.test_verkey,
my_endpoint=TestConfig.test_endpoint,
alias="Alias",
auto_accept_implicit=False,
mediation_id=None,
)
assert conn_rec
mock_conn_rec_cls.return_value.metadata_set.assert_called()
assert not self.responder.messages
async def test_receive_request_multiuse_multitenant(self):
multiuse_info = await self.session.wallet.create_local_did()
new_info = await self.session.wallet.create_local_did()
mock_request = async_mock.MagicMock(
did=TestConfig.test_did,
did_doc_attach=async_mock.MagicMock(
data=async_mock.MagicMock(
verify=async_mock.CoroutineMock(return_value=True),
signed=async_mock.MagicMock(
decode=async_mock.MagicMock(return_value="dummy-did-doc")
),
)
),
_thread=async_mock.MagicMock(pthid="dummy-pthid"),
)
self.context.update_settings(
{"wallet.id": "test_wallet", "multitenant.enabled": True}
)
ACCEPT_MANUAL = ConnRecord.ACCEPT_MANUAL
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec_cls, async_mock.patch.object(
InMemoryWallet, "create_local_did", autospec=True
) as mock_wallet_create_local_did, async_mock.patch.object(
test_module, "DIDDoc", autospec=True
) as mock_did_doc:
mock_conn_rec = async_mock.CoroutineMock(
connection_id="dummy",
accept=ACCEPT_MANUAL,
is_multiuse_invitation=True,
attach_request=async_mock.CoroutineMock(),
save=async_mock.CoroutineMock(),
retrieve_invitation=async_mock.CoroutineMock(return_value={}),
metadata_get_all=async_mock.CoroutineMock(return_value={}),
retrieve_request=async_mock.CoroutineMock(),
)
mock_conn_rec_cls.return_value = mock_conn_rec
mock_conn_rec_cls.retrieve_by_invitation_key = async_mock.CoroutineMock(
return_value=mock_conn_rec
)
mock_wallet_create_local_did.return_value = DIDInfo(
new_info.did, new_info.verkey, None
)
mock_did_doc.from_json = async_mock.MagicMock(
return_value=async_mock.MagicMock(did=TestConfig.test_did)
)
await self.manager.receive_request(
request=mock_request,
recipient_did=TestConfig.test_did,
recipient_verkey=TestConfig.test_verkey,
my_endpoint=TestConfig.test_endpoint,
alias="Alias",
auto_accept_implicit=False,
mediation_id=None,
)
self.multitenant_mgr.add_key.assert_called_once_with(
"test_wallet", new_info.verkey
)
async def test_receive_request_implicit_multitenant(self):
new_info = await self.session.wallet.create_local_did()
mock_request = async_mock.MagicMock(
did=TestConfig.test_did,
did_doc_attach=async_mock.MagicMock(
data=async_mock.MagicMock(
verify=async_mock.CoroutineMock(return_value=True),
signed=async_mock.MagicMock(
decode=async_mock.MagicMock(return_value="dummy-did-doc")
),
)
),
_thread=async_mock.MagicMock(pthid="did:sov:publicdid0000000000000"),
)
self.context.update_settings(
{
"wallet.id": "test_wallet",
"multitenant.enabled": True,
"public_invites": True,
"debug.auto_accept_requests": False,
}
)
ACCEPT_MANUAL = ConnRecord.ACCEPT_MANUAL
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec_cls, async_mock.patch.object(
InMemoryWallet, "create_local_did", autospec=True
) as mock_wallet_create_local_did, async_mock.patch.object(
InMemoryWallet, "get_local_did", autospec=True
) as mock_wallet_get_local_did, async_mock.patch.object(
test_module, "DIDPosture", autospec=True
) as mock_did_posture, async_mock.patch.object(
test_module, "DIDDoc", autospec=True
) as mock_did_doc:
mock_conn_rec = async_mock.CoroutineMock(
connection_id="dummy",
accept=ACCEPT_MANUAL,
is_multiuse_invitation=False,
attach_request=async_mock.CoroutineMock(),
save=async_mock.CoroutineMock(),
retrieve_invitation=async_mock.CoroutineMock(return_value={}),
metadata_get_all=async_mock.CoroutineMock(return_value={}),
retrieve_request=async_mock.CoroutineMock(),
)
mock_conn_rec_cls.return_value = mock_conn_rec
mock_conn_rec_cls.retrieve_by_invitation_key = async_mock.CoroutineMock(
side_effect=StorageNotFoundError()
)
mock_did_posture.get = async_mock.MagicMock(
return_value=test_module.DIDPosture.PUBLIC
)
mock_wallet_create_local_did.return_value = DIDInfo(
new_info.did, new_info.verkey, None
)
mock_did_doc.from_json = async_mock.MagicMock(
return_value=async_mock.MagicMock(did=TestConfig.test_did)
)
mock_wallet_get_local_did.return_value = DIDInfo(
TestConfig.test_did, TestConfig.test_verkey, None
)
await self.manager.receive_request(
request=mock_request,
recipient_did=TestConfig.test_did,
recipient_verkey=None,
my_endpoint=TestConfig.test_endpoint,
alias="Alias",
auto_accept_implicit=False,
mediation_id=None,
)
self.multitenant_mgr.add_key.assert_called_once_with(
"test_wallet", new_info.verkey
)
async def test_receive_request_peer_did_not_found_x(self):
mock_request = async_mock.MagicMock(
did=TestConfig.test_did,
did_doc_attach=async_mock.MagicMock(
data=async_mock.MagicMock(
verify=async_mock.CoroutineMock(return_value=True),
signed=async_mock.MagicMock(
decode=async_mock.MagicMock(return_value="dummy-did-doc")
),
)
),
_thread=async_mock.MagicMock(pthid="dummy-pthid"),
)
await self.session.wallet.create_local_did(seed=None, did=TestConfig.test_did)
with async_mock.patch.object(
test_module, "ConnRecord", async_mock.MagicMock()
) as mock_conn_rec_cls:
mock_conn_rec_cls.retrieve_by_invitation_key = async_mock.CoroutineMock(
side_effect=StorageNotFoundError()
)
with self.assertRaises(DIDXManagerError):
await self.manager.receive_request(
request=mock_request,
recipient_did=TestConfig.test_did,
recipient_verkey=TestConfig.test_verkey,
my_endpoint=TestConfig.test_endpoint,
alias="Alias",
auto_accept_implicit=False,
mediation_id=None,
)
async def test_create_response(self):
conn_rec = ConnRecord(
connection_id="dummy", state=ConnRecord.State.REQUEST.rfc23
)
with async_mock.patch.object(
test_module.ConnRecord, "retrieve_request", async_mock.CoroutineMock()
) as mock_retrieve_req, async_mock.patch.object(
conn_rec, "save", async_mock.CoroutineMock()
) as mock_save, async_mock.patch.object(
test_module, "DIDDoc", autospec=True
) as mock_did_doc, async_mock.patch.object(
test_module, "AttachDecorator", autospec=True
) as mock_attach_deco, async_mock.patch.object(
test_module, "DIDXResponse", autospec=True
) as mock_response, async_mock.patch.object(
self.manager, "create_did_document", async_mock.CoroutineMock()
) as mock_create_did_doc:
mock_create_did_doc.return_value = async_mock.MagicMock(
serialize=async_mock.MagicMock()
)
mock_attach_deco.from_indy_dict = async_mock.MagicMock(
return_value=async_mock.MagicMock(
data=async_mock.MagicMock(sign=async_mock.CoroutineMock())
)
)
await self.manager.create_response(conn_rec, "http://10.20.30.40:5060/")
async def test_create_response_mediation_id(self):
mediation_record = MediationRecord(
role=MediationRecord.ROLE_CLIENT,
state=MediationRecord.STATE_GRANTED,
connection_id=self.test_mediator_conn_id,
routing_keys=self.test_mediator_routing_keys,
endpoint=self.test_mediator_endpoint,
)
await mediation_record.save(self.session)
invi = InvitationMessage(
comment="test",
handshake_protocols=[
pfx.qualify(HSProto.RFC23.name) for pfx in DIDCommPrefix
],
service_dids=[TestConfig.test_did],
)
record = ConnRecord(
invitation_key=TestConfig.test_verkey,
invitation_msg_id=invi._id,
their_label="Hello",
their_role=ConnRecord.Role.RESPONDER.rfc160,
alias="Bob",
my_did=None,
state=ConnRecord.State.REQUEST.rfc23,
)
await record.save(self.session)
await record.attach_invitation(self.session, invi)
with async_mock.patch.object(
ConnRecord, "log_state", autospec=True
) as mock_conn_log_state, async_mock.patch.object(
ConnRecord, "retrieve_request", autospec=True
) as mock_conn_retrieve_request, async_mock.patch.object(
ConnRecord, "save", autospec=True
) as mock_conn_save, async_mock.patch.object(
record, "metadata_get", async_mock.CoroutineMock(return_value=False)
), async_mock.patch.object(
test_module, "AttachDecorator", autospec=True
) as mock_attach_deco:
mock_attach_deco.from_indy_dict = async_mock.MagicMock(
return_value=async_mock.MagicMock(
data=async_mock.MagicMock(sign=async_mock.CoroutineMock())
)
)
await self.manager.create_response(
record, mediation_id=mediation_record.mediation_id
)
assert len(self.responder.messages) == 1
message, target = self.responder.messages[0]
assert isinstance(message, KeylistUpdate)
assert len(message.updates) == 1
(add,) = message.updates
assert add.action == KeylistUpdateRule.RULE_ADD
assert add.recipient_key
async def test_create_response_mediation_id_invalid_conn_state(self):
mediation_record = MediationRecord(
role=MediationRecord.ROLE_CLIENT,
state=MediationRecord.STATE_GRANTED,
connection_id=self.test_mediator_conn_id,
routing_keys=self.test_mediator_routing_keys,
endpoint=self.test_mediator_endpoint,
)
await mediation_record.save(self.session)
invi = InvitationMessage(
comment="test",
handshake_protocols=[
pfx.qualify(HSProto.RFC23.name) for pfx in DIDCommPrefix
],
service_dids=[TestConfig.test_did],
)
record = ConnRecord(
invitation_key=TestConfig.test_verkey,
invitation_msg_id=invi._id,
their_label="Hello",
their_role=ConnRecord.Role.RESPONDER.rfc160,
alias="Bob",
my_did=None,
)
await record.save(self.session)
await record.attach_invitation(self.session, invi)
with async_mock.patch.object(
ConnRecord, "log_state", autospec=True
) as mock_conn_log_state, async_mock.patch.object(
ConnRecord, "retrieve_request", autospec=True
) as mock_conn_retrieve_request, async_mock.patch.object(
ConnRecord, "save", autospec=True
) as mock_conn_save, async_mock.patch.object(
record, "metadata_get", async_mock.CoroutineMock(return_value=False)
):
with self.assertRaises(DIDXManagerError) as context:
await self.manager.create_response(
record, mediation_id=mediation_record.mediation_id
)
assert "Connection not in state" in str(context.exception)
async def test_create_response_multitenant(self):
conn_rec = ConnRecord(
connection_id="dummy", state=ConnRecord.State.REQUEST.rfc23
)
self.manager.session.context.update_settings(
{
"multitenant.enabled": True,
"wallet.id": "test_wallet",
}
)
with async_mock.patch.object(
test_module.ConnRecord, "retrieve_request"
), async_mock.patch.object(
conn_rec, "save", async_mock.CoroutineMock()
), async_mock.patch.object(
test_module, "AttachDecorator", autospec=True
) as mock_attach_deco, async_mock.patch.object(
self.manager, "create_did_document", async_mock.CoroutineMock()
) as mock_create_did_doc, async_mock.patch.object(
InMemoryWallet, "create_local_did", autospec=True
) as mock_wallet_create_local_did:
mock_wallet_create_local_did.return_value = DIDInfo(
TestConfig.test_did, TestConfig.test_verkey, None
)
mock_create_did_doc.return_value = async_mock.MagicMock(
serialize=async_mock.MagicMock()
)
mock_attach_deco.from_indy_dict = async_mock.MagicMock(
return_value=async_mock.MagicMock(
data=async_mock.MagicMock(sign=async_mock.CoroutineMock())
)
)
await self.manager.create_response(conn_rec)
self.multitenant_mgr.add_key.assert_called_once_with(
"test_wallet", TestConfig.test_verkey
)
async def test_create_response_conn_rec_my_did(self):
conn_rec = ConnRecord(
connection_id="dummy",
my_did=TestConfig.test_did,
state=ConnRecord.State.REQUEST.rfc23,
)
with async_mock.patch.object(
test_module.ConnRecord, "retrieve_request", async_mock.CoroutineMock()
) as mock_retrieve_req, async_mock.patch.object(
conn_rec, "save", async_mock.CoroutineMock()
) as mock_save, async_mock.patch.object(
test_module, "DIDDoc", autospec=True
) as mock_did_doc, async_mock.patch.object(
test_module, "AttachDecorator", autospec=True
) as mock_attach_deco, async_mock.patch.object(
test_module, "DIDXResponse", autospec=True
) as mock_response, async_mock.patch.object(
self.manager, "create_did_document", async_mock.CoroutineMock()
) as mock_create_did_doc, async_mock.patch.object(
InMemoryWallet, "get_local_did", async_mock.CoroutineMock()
) as mock_get_loc_did:
mock_get_loc_did.return_value = self.did_info
mock_create_did_doc.return_value = async_mock.MagicMock(
serialize=async_mock.MagicMock()
)
mock_attach_deco.from_indy_dict = async_mock.MagicMock(
return_value=async_mock.MagicMock(
data=async_mock.MagicMock(sign=async_mock.CoroutineMock())
)
)
await self.manager.create_response(conn_rec, "http://10.20.30.40:5060/")
async def test_create_response_bad_state(self):
with self.assertRaises(DIDXManagerError):
await self.manager.create_response(
ConnRecord(
invitation_key=TestConfig.test_verkey,
their_label="Hello",
their_role=ConnRecord.Role.REQUESTER.rfc23,
state=ConnRecord.State.ABANDONED.rfc23,
alias="Bob",
)
)
async def test_accept_response_find_by_thread_id(self):
mock_response = async_mock.MagicMock()
mock_response._thread = async_mock.MagicMock()
mock_response.did = TestConfig.test_target_did
mock_response.did_doc_attach = async_mock.MagicMock(
data=async_mock.MagicMock(
verify=async_mock.CoroutineMock(return_value=True),
signed=async_mock.MagicMock(
decode=async_mock.MagicMock(
return_value=json.dumps({"dummy": "did-doc"})
)
),
)
)
receipt = MessageReceipt(
recipient_did=TestConfig.test_did,
recipient_did_public=True,
)
with async_mock.patch.object(
ConnRecord, "save", autospec=True
) as mock_conn_rec_save, async_mock.patch.object(
ConnRecord, "retrieve_by_request_id", async_mock.CoroutineMock()
) as mock_conn_retrieve_by_req_id, async_mock.patch.object(
ConnRecord, "retrieve_by_id", async_mock.CoroutineMock()
) as mock_conn_retrieve_by_id, async_mock.patch.object(
DIDDoc, "deserialize", async_mock.MagicMock()
) as mock_did_doc_deser:
mock_did_doc_deser.return_value = async_mock.MagicMock(
did=TestConfig.test_target_did
)
mock_conn_retrieve_by_req_id.return_value = async_mock.MagicMock(
did=TestConfig.test_target_did,
did_doc_attach=async_mock.MagicMock(
data=async_mock.MagicMock(
verify=async_mock.CoroutineMock(return_value=True),
signed=async_mock.MagicMock(
decode=async_mock.MagicMock(
return_value=json.dumps({"dummy": "did-doc"})
)
),
)
),
state=ConnRecord.State.REQUEST.rfc23,
save=async_mock.CoroutineMock(),
metadata_get=async_mock.CoroutineMock(),
connection_id="test-conn-id",
)
mock_conn_retrieve_by_id.return_value = async_mock.MagicMock(
their_did=TestConfig.test_target_did,
save=async_mock.CoroutineMock(),
)
conn_rec = await self.manager.accept_response(mock_response, receipt)
assert conn_rec.their_did == TestConfig.test_target_did
assert ConnRecord.State.get(conn_rec.state) is ConnRecord.State.COMPLETED
async def test_accept_response_not_found_by_thread_id_receipt_has_sender_did(self):
mock_response = async_mock.MagicMock()
mock_response._thread = async_mock.MagicMock()
mock_response.did = TestConfig.test_target_did
mock_response.did_doc_attach = async_mock.MagicMock(
data=async_mock.MagicMock(
verify=async_mock.CoroutineMock(return_value=True),
signed=async_mock.MagicMock(
decode=async_mock.MagicMock(
return_value=json.dumps({"dummy": "did-doc"})
)
),
)
)
receipt = MessageReceipt(sender_did=TestConfig.test_target_did)
with async_mock.patch.object(
ConnRecord, "save", autospec=True
) as mock_conn_rec_save, async_mock.patch.object(
ConnRecord, "retrieve_by_request_id", async_mock.CoroutineMock()
) as mock_conn_retrieve_by_req_id, async_mock.patch.object(
ConnRecord, "retrieve_by_did", async_mock.CoroutineMock()
) as mock_conn_retrieve_by_did, async_mock.patch.object(
DIDDoc, "deserialize", async_mock.MagicMock()
) as mock_did_doc_deser:
mock_did_doc_deser.return_value = async_mock.MagicMock(
did=TestConfig.test_target_did
)
mock_conn_retrieve_by_req_id.side_effect = StorageNotFoundError()
mock_conn_retrieve_by_did.return_value = async_mock.MagicMock(
did=TestConfig.test_target_did,
did_doc_attach=async_mock.MagicMock(
data=async_mock.MagicMock(
verify=async_mock.CoroutineMock(return_value=True),
signed=async_mock.MagicMock(
decode=async_mock.MagicMock(
return_value=json.dumps({"dummy": "did-doc"})
)
),
)
),
state=ConnRecord.State.REQUEST.rfc23,
save=async_mock.CoroutineMock(),
metadata_get=async_mock.CoroutineMock(return_value=False),
connection_id="test-conn-id",
)
conn_rec = await self.manager.accept_response(mock_response, receipt)
assert conn_rec.their_did == TestConfig.test_target_did
assert ConnRecord.State.get(conn_rec.state) is ConnRecord.State.COMPLETED
async def test_accept_response_not_found_by_thread_id_nor_receipt_sender_did(self):
mock_response = async_mock.MagicMock()
mock_response._thread = async_mock.MagicMock()
mock_response.did = TestConfig.test_target_did
mock_response.did_doc_attach = async_mock.MagicMock(
data=async_mock.MagicMock(
verify=async_mock.CoroutineMock(return_value=True),
signed=async_mock.MagicMock(
decode=async_mock.MagicMock(
return_value=json.dumps({"dummy": "did-doc"})
)
),
)
)
receipt = MessageReceipt(sender_did=TestConfig.test_target_did)
with async_mock.patch.object(
ConnRecord, "save", autospec=True
) as mock_conn_rec_save, async_mock.patch.object(
ConnRecord, "retrieve_by_request_id", async_mock.CoroutineMock()
) as mock_conn_retrieve_by_req_id, async_mock.patch.object(
ConnRecord, "retrieve_by_did", async_mock.CoroutineMock()
) as mock_conn_retrieve_by_did:
mock_conn_retrieve_by_req_id.side_effect = StorageNotFoundError()
mock_conn_retrieve_by_did.side_effect = StorageNotFoundError()
with self.assertRaises(DIDXManagerError):
await self.manager.accept_response(mock_response, receipt)
async def test_accept_response_find_by_thread_id_bad_state(self):
mock_response = async_mock.MagicMock()
mock_response._thread = async_mock.MagicMock()
mock_response.did = TestConfig.test_target_did
mock_response.did_doc_attach = async_mock.MagicMock(
data=async_mock.MagicMock(
verify=async_mock.CoroutineMock(return_value=True),
signed=async_mock.MagicMock(
decode=async_mock.MagicMock(
return_value=json.dumps({"dummy": "did-doc"})
)
),
)
)
receipt = MessageReceipt(sender_did=TestConfig.test_target_did)
with async_mock.patch.object(
ConnRecord, "save", autospec=True
) as mock_conn_rec_save, async_mock.patch.object(
ConnRecord, "retrieve_by_request_id", async_mock.CoroutineMock()
) as mock_conn_retrieve_by_req_id:
mock_conn_retrieve_by_req_id.return_value = async_mock.MagicMock(
state=ConnRecord.State.ABANDONED.rfc23
)
with self.assertRaises(DIDXManagerError):
await self.manager.accept_response(mock_response, receipt)
async def test_accept_response_find_by_thread_id_no_did_doc_attached(self):
mock_response = async_mock.MagicMock()
mock_response._thread = async_mock.MagicMock()
mock_response.did = TestConfig.test_target_did
mock_response.did_doc_attach = None
receipt = MessageReceipt(sender_did=TestConfig.test_target_did)
with async_mock.patch.object(
ConnRecord, "save", autospec=True
) as mock_conn_rec_save, async_mock.patch.object(
ConnRecord, "retrieve_by_request_id", async_mock.CoroutineMock()
) as mock_conn_retrieve_by_req_id:
mock_conn_retrieve_by_req_id.return_value = async_mock.MagicMock(
did=TestConfig.test_target_did,
did_doc_attach=async_mock.MagicMock(
data=async_mock.MagicMock(
verify=async_mock.CoroutineMock(return_value=True),
signed=async_mock.MagicMock(
decode=async_mock.MagicMock(
return_value=json.dumps({"dummy": "did-doc"})
)
),
)
),
state=ConnRecord.State.REQUEST.rfc23,
save=async_mock.CoroutineMock(),
)
with self.assertRaises(DIDXManagerError):
await self.manager.accept_response(mock_response, receipt)
async def test_accept_response_find_by_thread_id_did_mismatch(self):
mock_response = async_mock.MagicMock()
mock_response._thread = async_mock.MagicMock()
mock_response.did = TestConfig.test_target_did
mock_response.did_doc_attach = async_mock.MagicMock(
data=async_mock.MagicMock(
verify=async_mock.CoroutineMock(return_value=True),
signed=async_mock.MagicMock(
decode=async_mock.MagicMock(
return_value=json.dumps({"dummy": "did-doc"})
)
),
)
)
receipt = MessageReceipt(sender_did=TestConfig.test_target_did)
with async_mock.patch.object(
ConnRecord, "save", autospec=True
) as mock_conn_rec_save, async_mock.patch.object(
ConnRecord, "retrieve_by_request_id", async_mock.CoroutineMock()
) as mock_conn_retrieve_by_req_id, async_mock.patch.object(
ConnRecord, "retrieve_by_id", async_mock.CoroutineMock()
) as mock_conn_retrieve_by_id, async_mock.patch.object(
DIDDoc, "deserialize", async_mock.MagicMock()
) as mock_did_doc_deser:
mock_did_doc_deser.return_value = async_mock.MagicMock(
did=TestConfig.test_did
)
mock_conn_retrieve_by_req_id.return_value = async_mock.MagicMock(
did=TestConfig.test_target_did,
did_doc_attach=async_mock.MagicMock(
data=async_mock.MagicMock(
verify=async_mock.CoroutineMock(return_value=True),
signed=async_mock.MagicMock(
decode=async_mock.MagicMock(
return_value=json.dumps({"dummy": "did-doc"})
)
),
)
),
state=ConnRecord.State.REQUEST.rfc23,
save=async_mock.CoroutineMock(),
)
mock_conn_retrieve_by_id.return_value = async_mock.MagicMock(
their_did=TestConfig.test_target_did,
save=async_mock.CoroutineMock(),
)
with self.assertRaises(DIDXManagerError):
await self.manager.accept_response(mock_response, receipt)
async def test_accept_complete(self):
mock_complete = async_mock.MagicMock()
receipt = MessageReceipt(sender_did=TestConfig.test_target_did)
with async_mock.patch.object(
ConnRecord, "retrieve_by_request_id", async_mock.CoroutineMock()
) as mock_conn_retrieve_by_req_id:
mock_conn_retrieve_by_req_id.return_value.save = async_mock.CoroutineMock()
conn_rec = await self.manager.accept_complete(mock_complete, receipt)
assert ConnRecord.State.get(conn_rec.state) is ConnRecord.State.COMPLETED
async def test_accept_complete_x_not_found(self):
mock_complete = async_mock.MagicMock()
receipt = MessageReceipt(sender_did=TestConfig.test_target_did)
with async_mock.patch.object(
ConnRecord, "retrieve_by_request_id", async_mock.CoroutineMock()
) as mock_conn_retrieve_by_req_id:
mock_conn_retrieve_by_req_id.side_effect = StorageNotFoundError()
with self.assertRaises(DIDXManagerError):
await self.manager.accept_complete(mock_complete, receipt)
async def test_create_did_document(self):
did_info = DIDInfo(
TestConfig.test_did,
TestConfig.test_verkey,
None,
)
mock_conn = async_mock.MagicMock(
connection_id="dummy",
inbound_connection_id=None,
their_did=TestConfig.test_target_did,
state=ConnRecord.State.COMPLETED.rfc23,
)
did_doc = self.make_did_doc(
did=TestConfig.test_target_did,
verkey=TestConfig.test_target_verkey,
)
for i in range(2): # first cover store-record, then update-value
await self.manager.store_did_document(did_doc)
with async_mock.patch.object(
ConnRecord, "retrieve_by_id", async_mock.CoroutineMock()
) as mock_conn_rec_retrieve_by_id:
mock_conn_rec_retrieve_by_id.return_value = mock_conn
did_doc = await self.manager.create_did_document(
did_info=did_info,
inbound_connection_id="dummy",
svc_endpoints=[TestConfig.test_endpoint],
)
async def test_create_did_document_not_completed(self):
did_info = DIDInfo(
TestConfig.test_did,
TestConfig.test_verkey,
None,
)
mock_conn = async_mock.MagicMock(
connection_id="dummy",
inbound_connection_id=None,
their_did=TestConfig.test_target_did,
state=ConnRecord.State.ABANDONED.rfc23,
)
with async_mock.patch.object(
ConnRecord, "retrieve_by_id", async_mock.CoroutineMock()
) as mock_conn_rec_retrieve_by_id:
mock_conn_rec_retrieve_by_id.return_value = mock_conn
with self.assertRaises(BaseConnectionManagerError):
await self.manager.create_did_document(
did_info=did_info,
inbound_connection_id="dummy",
svc_endpoints=[TestConfig.test_endpoint],
)
async def test_create_did_document_no_services(self):
did_info = DIDInfo(
TestConfig.test_did,
TestConfig.test_verkey,
None,
)
mock_conn = async_mock.MagicMock(
connection_id="dummy",
inbound_connection_id=None,
their_did=TestConfig.test_target_did,
state=ConnRecord.State.COMPLETED.rfc23,
)
x_did_doc = self.make_did_doc(
did=TestConfig.test_target_did, verkey=TestConfig.test_target_verkey
)
x_did_doc._service = {}
for i in range(2): # first cover store-record, then update-value
await self.manager.store_did_document(x_did_doc)
with async_mock.patch.object(
ConnRecord, "retrieve_by_id", async_mock.CoroutineMock()
) as mock_conn_rec_retrieve_by_id:
mock_conn_rec_retrieve_by_id.return_value = mock_conn
with self.assertRaises(BaseConnectionManagerError):
await self.manager.create_did_document(
did_info=did_info,
inbound_connection_id="dummy",
svc_endpoints=[TestConfig.test_endpoint],
)
async def test_create_did_document_no_service_endpoint(self):
did_info = DIDInfo(
TestConfig.test_did,
TestConfig.test_verkey,
None,
)
mock_conn = async_mock.MagicMock(
connection_id="dummy",
inbound_connection_id=None,
their_did=TestConfig.test_target_did,
state=ConnRecord.State.COMPLETED.rfc23,
)
x_did_doc = self.make_did_doc(
did=TestConfig.test_target_did, verkey=TestConfig.test_target_verkey
)
x_did_doc._service = {}
x_did_doc.set(
Service(TestConfig.test_target_did, "dummy", "IndyAgent", [], [], "", 0)
)
for i in range(2): # first cover store-record, then update-value
await self.manager.store_did_document(x_did_doc)
with async_mock.patch.object(
ConnRecord, "retrieve_by_id", async_mock.CoroutineMock()
) as mock_conn_rec_retrieve_by_id:
mock_conn_rec_retrieve_by_id.return_value = mock_conn
with self.assertRaises(BaseConnectionManagerError):
await self.manager.create_did_document(
did_info=did_info,
inbound_connection_id="dummy",
svc_endpoints=[TestConfig.test_endpoint],
)
async def test_create_did_document_no_service_recip_keys(self):
did_info = DIDInfo(
TestConfig.test_did,
TestConfig.test_verkey,
None,
)
mock_conn = async_mock.MagicMock(
connection_id="dummy",
inbound_connection_id=None,
their_did=TestConfig.test_target_did,
state=ConnRecord.State.COMPLETED.rfc23,
)
x_did_doc = self.make_did_doc(
did=TestConfig.test_target_did, verkey=TestConfig.test_target_verkey
)
x_did_doc._service = {}
x_did_doc.set(
Service(
TestConfig.test_target_did,
"dummy",
"IndyAgent",
[],
[],
TestConfig.test_endpoint,
0,
)
)
for i in range(2): # first cover store-record, then update-value
await self.manager.store_did_document(x_did_doc)
with async_mock.patch.object(
ConnRecord, "retrieve_by_id", async_mock.CoroutineMock()
) as mock_conn_rec_retrieve_by_id:
mock_conn_rec_retrieve_by_id.return_value = mock_conn
with self.assertRaises(BaseConnectionManagerError):
await self.manager.create_did_document(
did_info=did_info,
inbound_connection_id="dummy",
svc_endpoints=[TestConfig.test_endpoint],
)
async def test_did_key_storage(self):
did_info = DIDInfo(
TestConfig.test_did,
TestConfig.test_verkey,
None,
)
did_doc = self.make_did_doc(
did=TestConfig.test_target_did, verkey=TestConfig.test_target_verkey
)
await self.manager.add_key_for_did(
did=TestConfig.test_target_did, key=TestConfig.test_target_verkey
)
did = await self.manager.find_did_for_key(key=TestConfig.test_target_verkey)
assert did == TestConfig.test_target_did
await self.manager.remove_keys_for_did(TestConfig.test_target_did)
async def test_diddoc_connection_targets_diddoc(self):
did_doc = self.make_did_doc(
TestConfig.test_target_did,
TestConfig.test_target_verkey,
)
targets = self.manager.diddoc_connection_targets(
did_doc,
TestConfig.test_verkey,
)
assert isinstance(targets[0], ConnectionTarget)
async def test_diddoc_connection_targets_diddoc_underspecified(self):
with self.assertRaises(BaseConnectionManagerError):
self.manager.diddoc_connection_targets(None, TestConfig.test_verkey)
x_did_doc = DIDDoc(did=None)
with self.assertRaises(BaseConnectionManagerError):
self.manager.diddoc_connection_targets(x_did_doc, TestConfig.test_verkey)
x_did_doc = self.make_did_doc(
did=TestConfig.test_target_did, verkey=TestConfig.test_target_verkey
)
x_did_doc._service = {}
with self.assertRaises(BaseConnectionManagerError):
self.manager.diddoc_connection_targets(x_did_doc, TestConfig.test_verkey)
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.quicksight import QuickSightHook
from airflow.providers.amazon.aws.hooks.sts import StsHook
from airflow.providers.amazon.aws.sensors.quicksight import QuickSightSensor
AWS_ACCOUNT_ID = "123456789012"
DATA_SET_ID = "DemoDataSet"
INGESTION_ID = "DemoDataSet_Ingestion"
class TestQuickSightSensor(unittest.TestCase):
def setUp(self):
self.sensor = QuickSightSensor(
task_id="test_quicksight_sensor",
aws_conn_id="aws_default",
data_set_id="DemoDataSet",
ingestion_id="DemoDataSet_Ingestion",
)
@mock.patch.object(QuickSightHook, "get_status")
@mock.patch.object(StsHook, "get_conn")
@mock.patch.object(StsHook, "get_account_number")
def test_poke_success(self, mock_get_account_number, sts_conn, mock_get_status):
mock_get_account_number.return_value = AWS_ACCOUNT_ID
mock_get_status.return_value = "COMPLETED"
self.assertTrue(self.sensor.poke({}))
mock_get_status.assert_called_once_with(AWS_ACCOUNT_ID, DATA_SET_ID, INGESTION_ID)
@mock.patch.object(QuickSightHook, "get_status")
@mock.patch.object(StsHook, "get_conn")
@mock.patch.object(StsHook, "get_account_number")
def test_poke_cancelled(self, mock_get_account_number, sts_conn, mock_get_status):
mock_get_account_number.return_value = AWS_ACCOUNT_ID
mock_get_status.return_value = "CANCELLED"
with self.assertRaises(AirflowException):
self.sensor.poke({})
mock_get_status.assert_called_once_with(AWS_ACCOUNT_ID, DATA_SET_ID, INGESTION_ID)
@mock.patch.object(QuickSightHook, "get_status")
@mock.patch.object(StsHook, "get_conn")
@mock.patch.object(StsHook, "get_account_number")
def test_poke_failed(self, mock_get_account_number, sts_conn, mock_get_status):
mock_get_account_number.return_value = AWS_ACCOUNT_ID
mock_get_status.return_value = "FAILED"
with self.assertRaises(AirflowException):
self.sensor.poke({})
mock_get_status.assert_called_once_with(AWS_ACCOUNT_ID, DATA_SET_ID, INGESTION_ID)
@mock.patch.object(QuickSightHook, "get_status")
@mock.patch.object(StsHook, "get_conn")
@mock.patch.object(StsHook, "get_account_number")
def test_poke_initialized(self, mock_get_account_number, sts_conn, mock_get_status):
mock_get_account_number.return_value = AWS_ACCOUNT_ID
mock_get_status.return_value = "INITIALIZED"
self.assertFalse(self.sensor.poke({}))
mock_get_status.assert_called_once_with(AWS_ACCOUNT_ID, DATA_SET_ID, INGESTION_ID)
|
# -*- coding: utf-8 -*-
import re
import logging
import warnings
from scrapy import signals
from scrapy.http import Request
from scrapy.utils.httpobj import urlparse_cached
logger = logging.getLogger(__name__)
class DisallowDomainMiddleware(object):
def __init__(self, stats): # type: ignore
self.stats = stats
@classmethod
def from_crawler(cls, crawler): # type: ignore
o = cls(crawler.stats)
crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)
return o
def process_spider_output(self, response, result, spider): # type: ignore
for x in result:
if isinstance(x, Request):
if x.dont_filter or self.should_follow(x, spider):
yield x
else:
domain = urlparse_cached(x).hostname
if domain and domain not in self.domains_seen:
self.domains_seen.add(domain)
logger.debug("Filtered offsite request to %(domain)r: %(request)s",
{'domain': domain, 'request': x}, extra={'spider': spider})
self.stats.inc_value('offsite/domains', spider=spider)
self.stats.inc_value('offsite/filtered', spider=spider)
else:
yield x
def should_follow(self, request, spider): # type: ignore
regex = self.host_regex
if regex is None:
return True
# hostname can be None for wrong urls (like javascript links)
host = urlparse_cached(request).hostname or ''
return not bool(regex.search(host))
def get_host_regex(self, spider): # type: ignore
"""Override this method to implement a different offsite policy"""
disallowed_domains = getattr(spider, 'disallowed_domains', None)
if not disallowed_domains:
return None # allow all by default
url_pattern = re.compile("^https?://.*$")
for domain in disallowed_domains:
if url_pattern.match(domain):
warnings.warn("disallowed_domains accepts only domains, not URLs. Ignoring URL entry %s in disallowed_domains." % domain, URLWarning)
regex = r'^(.*\.)?(%s)$' % '|'.join(re.escape(d) for d in disallowed_domains if d is not None)
return re.compile(regex)
def spider_opened(self, spider): # type: ignore
self.host_regex = self.get_host_regex(spider)
self.domains_seen = set() # type: ignore
class URLWarning(Warning):
pass
|
import unittest
from onedrive_client import od_stringutils
class TestStringUtils(unittest.TestCase):
INCREMENTED_FILE_NAMES = (('Folder', 'Folder 1'), ('Folder 1', 'Folder 2'),
('file.txt', 'file 1.txt'), ('file 1.txt', 'file 2.txt'),
('Folder 0', 'Folder 0 1'))
def test_get_filename_with_incremented_count(self):
for orig, exp in self.INCREMENTED_FILE_NAMES:
self.assertEqual(exp, od_stringutils.get_filename_with_incremented_count(orig))
if __name__ == '__main__':
unittest.main()
|
from tf_bodypix.utils.v4l2 import VideoLoopbackImageSink
from .api import T_OutputSink
def get_v4l2_output_sink(device_name: str, **__) -> T_OutputSink:
return VideoLoopbackImageSink(device_name)
OUTPUT_SINK_FACTORY = get_v4l2_output_sink
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matmul."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
class MatMulTest(tf.test.TestCase):
def _testCpuMatmul(self, x, y, transpose_x=False, transpose_y=False):
x_mat = np.matrix(x).T if transpose_x else np.matrix(x)
y_mat = np.matrix(y).T if transpose_y else np.matrix(y)
np_ans = x_mat * y_mat
with self.test_session(use_gpu=False):
tf_ans = tf.matmul(x, y, transpose_x, transpose_y).eval()
self.assertAllClose(np_ans, tf_ans)
self.assertAllEqual(np_ans.shape, tf_ans.shape)
def _testGpuMatmul(self, x, y, transpose_x=False, transpose_y=False):
x_mat = np.matrix(x).T if transpose_x else np.matrix(x)
y_mat = np.matrix(y).T if transpose_y else np.matrix(y)
np_ans = x_mat * y_mat
with self.test_session(use_gpu=True):
tf_ans = tf.matmul(x, y, transpose_x, transpose_y).eval()
self.assertAllClose(np_ans, tf_ans)
self.assertAllEqual(np_ans.shape, tf_ans.shape)
def _randMatrix(self, rows, cols, dtype):
if dtype is np.complex64:
real = self._randMatrix(rows, cols, np.float32)
imag = self._randMatrix(rows, cols, np.float32)
return real + np.complex(0, 1) * imag
else:
return np.random.uniform(low=1.0, high=100.0, size=rows * cols).reshape(
[rows, cols]).astype(dtype)
# Basic test:
# [ [1],
# [2],
# [3], * [1, 2]
# [4] ]
def testFloatBasic(self):
x = np.arange(1., 5.).reshape([4, 1]).astype(np.float32)
y = np.arange(1., 3.).reshape([1, 2]).astype(np.float32)
self._testCpuMatmul(x, y)
self._testGpuMatmul(x, y)
def testDoubleBasic(self):
x = np.arange(1., 5.).reshape([4, 1]).astype(np.float64)
y = np.arange(1., 3.).reshape([1, 2]).astype(np.float64)
self._testCpuMatmul(x, y)
def testInt32Basic(self):
x = np.arange(1., 5.).reshape([4, 1]).astype(np.int32)
y = np.arange(1., 3.).reshape([1, 2]).astype(np.int32)
self._testCpuMatmul(x, y)
def testSComplexBasic(self):
x = np.arange(1., 5.).reshape([4, 1]).astype(np.complex64)
y = np.arange(1., 3.).reshape([1, 2]).astype(np.complex64)
self._testCpuMatmul(x, y)
# Tests testing random sized matrices.
def testFloatRandom(self):
for _ in range(10):
n, k, m = np.random.randint(1, 100, size=3)
x = self._randMatrix(n, k, np.float32)
y = self._randMatrix(k, m, np.float32)
self._testCpuMatmul(x, y)
self._testGpuMatmul(x, y)
def testDoubleRandom(self):
for _ in range(10):
n, k, m = np.random.randint(1, 100, size=3)
x = self._randMatrix(n, k, np.float64)
y = self._randMatrix(k, m, np.float64)
self._testCpuMatmul(x, y)
def testInt32Random(self):
for _ in range(10):
n, k, m = np.random.randint(1, 100, size=3)
x = self._randMatrix(n, k, np.int32)
y = self._randMatrix(k, m, np.int32)
self._testCpuMatmul(x, y)
def testSComplexRandom(self):
for _ in range(10):
n, k, m = np.random.randint(1, 100, size=3)
x = self._randMatrix(n, k, np.complex64)
y = self._randMatrix(k, m, np.complex64)
self._testCpuMatmul(x, y)
# Test the cases that transpose the matrices before multiplying.
# NOTE(keveman): The cases where only one of the inputs is
# transposed are covered by tf.matmul's gradient function.
def testFloatRandomTransposeBoth(self):
for _ in range(10):
n, k, m = np.random.randint(1, 100, size=3)
x = self._randMatrix(k, n, np.float32)
y = self._randMatrix(m, k, np.float32)
self._testCpuMatmul(x, y, True, True)
self._testGpuMatmul(x, y, True, True)
def testDoubleRandomTranposeBoth(self):
for _ in range(10):
n, k, m = np.random.randint(1, 100, size=3)
x = self._randMatrix(k, n, np.float64)
y = self._randMatrix(m, k, np.float64)
self._testCpuMatmul(x, y, True, True)
def testMatMul_OutEmpty_A(self):
n, k, m = 0, 8, 3
x = self._randMatrix(n, k, np.float32)
y = self._randMatrix(k, m, np.float32)
self._testCpuMatmul(x, y)
self._testGpuMatmul(x, y)
def testMatMul_OutEmpty_B(self):
n, k, m = 3, 8, 0
x = self._randMatrix(n, k, np.float32)
y = self._randMatrix(k, m, np.float32)
self._testCpuMatmul(x, y)
self._testGpuMatmul(x, y)
def testMatMul_Inputs_Empty(self):
n, k, m = 3, 0, 4
x = self._randMatrix(n, k, np.float32)
y = self._randMatrix(k, m, np.float32)
self._testCpuMatmul(x, y)
self._testGpuMatmul(x, y)
def testShapeErrors(self):
a = tf.placeholder(tf.float32, [32, 37])
b = tf.placeholder(tf.float32, [36, 2])
c = tf.placeholder(tf.float32, [37])
with self.assertRaisesRegexp(
ValueError,
r"Dimensions Dimension\(37\) and Dimension\(36\) are not compatible"):
tf.matmul(a, b)
with self.assertRaisesRegexp(ValueError, "must have rank 2"):
tf.matmul(a, c)
# TODO(zhifengc): Figures out how to test matmul gradients on GPU.
class MatMulGradientTest(tf.test.TestCase):
def testGradientInput0(self):
with self.test_session(use_gpu=False):
x = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2],
dtype=tf.float64, name="x")
y = tf.constant([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
shape=[2, 4], dtype=tf.float64, name="y")
m = tf.matmul(x, y, name="matmul")
err = tf.test.compute_gradient_error(x, [3, 2], m, [3, 4])
print("matmul input0 gradient err = ", err)
self.assertLess(err, 1e-10)
def testGradientInput1(self):
with self.test_session(use_gpu=False):
x = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2],
dtype=tf.float64, name="x")
y = tf.constant([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
shape=[2, 4], dtype=tf.float64, name="y")
m = tf.matmul(x, y, name="matmul")
err = tf.test.compute_gradient_error(y, [2, 4], m, [3, 4])
print("matmul input1 gradient err = ", err)
self.assertLess(err, 1e-10)
def _VerifyInput0(self, transpose_a, transpose_b):
shape_x = [3, 2]
shape_y = [2, 4]
if transpose_a:
shape_x = list(reversed(shape_x))
if transpose_b:
shape_y = list(reversed(shape_y))
with self.test_session(use_gpu=False):
x = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=shape_x,
dtype=tf.float64, name="x")
y = tf.constant([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
shape=shape_y, dtype=tf.float64, name="y")
m = tf.matmul(x, y, transpose_a, transpose_b, name="matmul")
err = tf.test.compute_gradient_error(x, shape_x, m, [3, 4])
print("matmul input0 gradient err = ", err)
self.assertLess(err, 1e-10)
def testGradientInput0WithTranspose(self):
self._VerifyInput0(transpose_a=True, transpose_b=False)
self._VerifyInput0(transpose_a=False, transpose_b=True)
self._VerifyInput0(transpose_a=True, transpose_b=True)
def _VerifyInput1(self, transpose_a, transpose_b):
shape_x = [3, 2]
shape_y = [2, 4]
if transpose_a:
shape_x = list(reversed(shape_x))
if transpose_b:
shape_y = list(reversed(shape_y))
with self.test_session(use_gpu=False):
x = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=shape_x,
dtype=tf.float64, name="x")
y = tf.constant([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
shape=shape_y, dtype=tf.float64, name="y")
m = tf.matmul(x, y, transpose_a, transpose_b, name="matmul")
err = tf.test.compute_gradient_error(y, shape_y, m, [3, 4])
print("matmul input1 gradient err = ", err)
self.assertLess(err, 1e-10)
def testGradientInput1WithTranspose(self):
self._VerifyInput1(transpose_a=True, transpose_b=False)
self._VerifyInput1(transpose_a=False, transpose_b=True)
self._VerifyInput1(transpose_a=True, transpose_b=True)
if __name__ == "__main__":
tf.test.main()
|
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
def test_str_split():
"Test wrapper for Pandas ``.str.split()`` method."
df = pd.DataFrame(
{"text": ["a_b_c", "c_d_e", np.nan, "f_g_h"], "numbers": range(1, 5)}
)
expected = df.copy()
expected["text"] = expected["text"].str.split("_")
result = df.process_text(column="text", string_function="split", pat="_")
assert_frame_equal(result, expected)
def test_str_cat():
"Test wrapper for Pandas ``.str.cat()`` method."
df = pd.DataFrame({"text": ["a", "b", "c", "d"], "numbers": range(1, 5)})
expected = df.copy()
expected["text"] = expected["text"].str.cat(others=["A", "B", "C", "D"])
result = df.process_text(
column="text", string_function="cat", others=["A", "B", "C", "D"],
)
assert_frame_equal(result, expected)
def test_str_get():
"""Test wrapper for Pandas ``.str.get()`` method."""
df = pd.DataFrame(
{"text": ["aA", "bB", "cC", "dD"], "numbers": range(1, 5)}
)
expected = df.copy()
expected["text"] = expected["text"].str.get(1)
result = df.process_text(column="text", string_function="get", i=-1)
assert_frame_equal(result, expected)
def test_str_lower():
"""Test string conversion to lowercase using ``.str.lower()``."""
df = pd.DataFrame(
{
"codes": range(1, 7),
"names": [
"Graham Chapman",
"John Cleese",
"Terry Gilliam",
"Eric Idle",
"Terry Jones",
"Michael Palin",
],
}
)
expected = df.copy()
expected["names"] = expected["names"].str.lower()
result = df.process_text(column="names", string_function="lower")
assert_frame_equal(result, expected)
def test_str_wrong():
"""Test that an invalid Pandas string method raises an exception."""
df = pd.DataFrame(
{"text": ["ragnar", "sammywemmy", "ginger"], "code": [1, 2, 3]}
)
with pytest.raises(KeyError):
df.process_text(column="text", string_function="invalid_function")
def test_str_wrong_parameters():
"""Test that invalid argument for Pandas string method raises an error."""
df = pd.DataFrame(
{"text": ["a_b_c", "c_d_e", np.nan, "f_g_h"], "numbers": range(1, 5)}
)
with pytest.raises(TypeError):
df.process_text(column="text", string_function="split", pattern="_")
|
apiAttachAvailable = u'API verf\xfcgbar'
apiAttachNotAvailable = u'Nicht verf\xfcgbar'
apiAttachPendingAuthorization = u'Ausstehende Genehmigungsanfrage'
apiAttachRefused = u'Abgelehnt'
apiAttachSuccess = u'Erfolg'
apiAttachUnknown = u'Unbekannt'
budDeletedFriend = u'Aus Freundesliste gel\xf6scht'
budFriend = u'Freund'
budNeverBeenFriend = u'War noch nie in Freundesliste'
budPendingAuthorization = u'Ausstehende Genehmigungsanfrage'
budUnknown = u'Unbekannt'
cfrBlockedByRecipient = u'Anruf von Empf\xe4nger gesperrt'
cfrMiscError = u'Sonstiger Fehler'
cfrNoCommonCodec = u'Kein Standard-Codec'
cfrNoProxyFound = u'Kein Proxy gefunden'
cfrNotAuthorizedByRecipient = u'Aktueller Benutzer nicht von Empf\xe4nger genehmigt'
cfrRecipientNotFriend = u'Empf\xe4nger kein Freund'
cfrRemoteDeviceError = u'Problem mit dem Audioger\xe4t der Gegenstelle'
cfrSessionTerminated = u'Sitzung beendet'
cfrSoundIOError = u'Ton-E/A-Fehler'
cfrSoundRecordingError = u'Aufnahmefehler'
cfrUnknown = u'Unbekannt'
cfrUserDoesNotExist = u'Benutzer/Telefonnummer gibt es nicht'
cfrUserIsOffline = u'Sie oder er ist offline'
chsAllCalls = u'Dialog mit Altversion'
chsDialog = u'Dialog'
chsIncomingCalls = u'Mehrere m\xfcssen annehmen'
chsLegacyDialog = u'Dialog mit Altversion'
chsMissedCalls = u'Dialog'
chsMultiNeedAccept = u'Mehrere m\xfcssen annehmen'
chsMultiSubscribed = u'Mehrere abonniert'
chsOutgoingCalls = u'Mehrere abonniert'
chsUnknown = u'Unbekannt'
chsUnsubscribed = u'Abonnement gek\xfcndigt'
clsBusy = u'Besetzt'
clsCancelled = u'Abbrechen'
clsEarlyMedia = u'Wiedergabe von Early Media'
clsFailed = u'Anruf leider fehlgeschlagen!'
clsFinished = u'Beendet'
clsInProgress = u'Aktives Gespr\xe4ch'
clsLocalHold = u'In lokaler Wartestellung'
clsMissed = u'Anruf in Abwesenheit von:'
clsOnHold = u'Konferenz wird gehalten'
clsRefused = u'Abgelehnt'
clsRemoteHold = u'In Fern-Wartestellung'
clsRinging = u'Gespr\xe4che'
clsRouting = u'Wird weitergeleitet'
clsTransferred = u'Unbekannt'
clsTransferring = u'Unbekannt'
clsUnknown = u'Unbekannt'
clsUnplaced = u'Nie get\xe4tigt'
clsVoicemailBufferingGreeting = u'Ansage wird gepuffert'
clsVoicemailCancelled = u'Voicemail wurde abgebrochen'
clsVoicemailFailed = u'Fehler bei Sprachnachricht'
clsVoicemailPlayingGreeting = u'Ansage wird abgespielt'
clsVoicemailRecording = u'Sprachnachricht wird aufgezeichnet'
clsVoicemailSent = u'Voicemail wurde gesendet'
clsVoicemailUploading = u'Voicemail wird hochgeladen'
cltIncomingP2P = u'Eingehender P2P-Anruf'
cltIncomingPSTN = u'Eingehender Anruf'
cltOutgoingP2P = u'Ausgehender P2P-Anruf'
cltOutgoingPSTN = u'Ausgehender Anruf'
cltUnknown = u'Unbekannt'
cmeAddedMembers = u'Hinzugef\xfcgte Mitglieder'
cmeCreatedChatWith = u'Chat erstellt mit'
cmeEmoted = u'Unbekannt'
cmeLeft = u'Links'
cmeSaid = u'Gesagt'
cmeSawMembers = u'Gesehene Mitglieder'
cmeSetTopic = u'Thema festlegen'
cmeUnknown = u'Unbekannt'
cmsRead = u'Gelesen'
cmsReceived = u'Empfangen'
cmsSending = u'Sende...'
cmsSent = u'Gesendet'
cmsUnknown = u'Unbekannt'
conConnecting = u'Verbindungsaufbau'
conOffline = u'Offline'
conOnline = u'Online'
conPausing = u'Wird angehalten'
conUnknown = u'Unbekannt'
cusAway = u'Abwesend'
cusDoNotDisturb = u'Besch\xe4ftigt'
cusInvisible = u'Als offline anzeigen'
cusLoggedOut = u'Offline'
cusNotAvailable = u'Nicht verf\xfcgbar'
cusOffline = u'Offline'
cusOnline = u'Online'
cusSkypeMe = u'Skype Me-Modus'
cusUnknown = u'Unbekannt'
cvsBothEnabled = u'Video wird gesendet und empfangen'
cvsNone = u'Kein Video'
cvsReceiveEnabled = u'Video wird empfangen'
cvsSendEnabled = u'Video wird gesendet'
cvsUnknown = u''
grpAllFriends = u'Alle Freunde'
grpAllUsers = u'Alle Benutzer'
grpCustomGroup = u'Benutzerdefiniert'
grpOnlineFriends = u'Online-Freunde'
grpPendingAuthorizationFriends = u'Ausstehende Genehmigungsanfrage'
grpProposedSharedGroup = u'Proposed Shared Group'
grpRecentlyContactedUsers = u'K\xfcrzlich kontaktierte Benutzer'
grpSharedGroup = u'Shared Group'
grpSkypeFriends = u'Skype-Freunde'
grpSkypeOutFriends = u'SkypeOut-Freunde'
grpUngroupedFriends = u'Nicht gruppierte Freunde'
grpUnknown = u'Unbekannt'
grpUsersAuthorizedByMe = u'Von mir genehmigt'
grpUsersBlockedByMe = u'Von mir blockiert'
grpUsersWaitingMyAuthorization = u'Warten auf meine Genehmigung'
leaAddDeclined = u'Hinzuf\xfcgung abgelehnt'
leaAddedNotAuthorized = u'Hinzugef\xfcgter Benutzer muss genehmigt sein'
leaAdderNotFriend = u'Hinzuf\xfcgender Benutzer muss Freund sein'
leaUnknown = u'Unbekannt'
leaUnsubscribe = u'Abonnement gek\xfcndigt'
leaUserIncapable = u'Benutzer unf\xe4hig'
leaUserNotFound = u'Kein Benutzer gefunden'
olsAway = u'Abwesend'
olsDoNotDisturb = u'Besch\xe4ftigt'
olsNotAvailable = u'Nicht verf\xfcgbar'
olsOffline = u'Offline'
olsOnline = u'Online'
olsSkypeMe = u'Skype Me-Modus'
olsSkypeOut = u'SkypeOut'
olsUnknown = u'Unbekannt'
smsMessageStatusComposing = u'Composing'
smsMessageStatusDelivered = u'Delivered'
smsMessageStatusFailed = u'Failed'
smsMessageStatusRead = u'Read'
smsMessageStatusReceived = u'Received'
smsMessageStatusSendingToServer = u'Sending to Server'
smsMessageStatusSentToServer = u'Sent to Server'
smsMessageStatusSomeTargetsFailed = u'Some Targets Failed'
smsMessageStatusUnknown = u'Unknown'
smsMessageTypeCCRequest = u'Confirmation Code Request'
smsMessageTypeCCSubmit = u'Confirmation Code Submit'
smsMessageTypeIncoming = u'Incoming'
smsMessageTypeOutgoing = u'Outgoing'
smsMessageTypeUnknown = u'Unknown'
smsTargetStatusAcceptable = u'Acceptable'
smsTargetStatusAnalyzing = u'Analyzing'
smsTargetStatusDeliveryFailed = u'Delivery Failed'
smsTargetStatusDeliveryPending = u'Delivery Pending'
smsTargetStatusDeliverySuccessful = u'Delivery Successful'
smsTargetStatusNotRoutable = u'Not Routable'
smsTargetStatusUndefined = u'Undefined'
smsTargetStatusUnknown = u'Unknown'
usexFemale = u'Weiblich'
usexMale = u'M\xe4nnlich'
usexUnknown = u'Unbekannt'
vmrConnectError = u'Verbindungsfehler'
vmrFileReadError = u'Fehler beim Lesen der Datei'
vmrFileWriteError = u'Fehler beim Schreiben in die Datei'
vmrMiscError = u'Sonstiger Fehler'
vmrNoError = u'Kein Fehler'
vmrNoPrivilege = u'Keine Voicemail-Berechtigung'
vmrNoVoicemail = u'Voicemail gibt es nicht'
vmrPlaybackError = u'Fehler bei der Wiedergabe'
vmrRecordingError = u'Fehler bei der Aufnahme'
vmrUnknown = u'Unbekannt'
vmsBlank = u'Leer'
vmsBuffering = u'Pufferung'
vmsDeleting = u'Wird gel\xf6scht'
vmsDownloading = u'Download l\xe4uft'
vmsFailed = u'Fehlgeschlagen'
vmsNotDownloaded = u'Nicht gedownloadet'
vmsPlayed = u'Abgespielt'
vmsPlaying = u'Wiedergabe'
vmsRecorded = u'Aufgenommen'
vmsRecording = u'Sprachnachricht wird aufgezeichnet'
vmsUnknown = u'Unbekannt'
vmsUnplayed = u'Nicht abgespielt'
vmsUploaded = u'Upload beendet'
vmsUploading = u'Upload'
vmtCustomGreeting = u'Benutzerdefinierte Ansage'
vmtDefaultGreeting = u'Standardansage'
vmtIncoming = u'Ich eine Sprachnachricht empfange'
vmtOutgoing = u'Ausgehend'
vmtUnknown = u'Unbekannt'
vssAvailable = u'Verf\xfcgbar'
vssNotAvailable = u'Nicht verf\xfcgbar'
vssPaused = u'Angehalten'
vssRejected = u'Abgelehnt'
vssRunning = u'Wird ausgef\xfchrt'
vssStarting = u'Wird gestartet'
vssStopping = u'Wird gestoppt'
vssUnknown = u'Unbekannt'
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the staff detectors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
import tensorflow as tf
from moonlight import image as omr_image
from moonlight import staves
from moonlight.staves import staffline_distance
from moonlight.staves import testing
class StaffDetectorsTest(tf.test.TestCase):
def setUp(self):
# The normal _MIN_STAFFLINE_DISTANCE_SCORE is too large for the small images
# used in unit tests.
self.old_min_staffline_distance_score = (
staffline_distance._MIN_STAFFLINE_DISTANCE_SCORE)
staffline_distance._MIN_STAFFLINE_DISTANCE_SCORE = 10
def tearDown(self):
staffline_distance._MIN_STAFFLINE_DISTANCE_SCORE = (
self.old_min_staffline_distance_score)
def test_single_staff(self):
blank_row = [255] * 50
staff_row = [255] * 4 + [0] * 42 + [255] * 4
# Create an image with 5 staff lines, with a slightly noisy staffline
# thickness and distance.
image = np.asarray([blank_row] * 25 + [staff_row] * 2 + [blank_row] * 8 +
[staff_row] * 3 + [blank_row] * 8 + [staff_row] * 3 +
[blank_row] * 9 + [staff_row] * 2 + [blank_row] * 8 +
[staff_row] * 2 + [blank_row] * 25, np.uint8)
for detector in self.generate_staff_detectors(image):
with self.test_session() as sess:
staves_arr, staffline_distances, staffline_thickness = sess.run(
(detector.staves, detector.staffline_distance,
detector.staffline_thickness))
expected_y = 25 + 2 + 8 + 3 + 8 + 1 # y coordinate of the center line
self.assertEqual(
staves_arr.shape[0], 1,
'Expected single staff from detector %s. Got: %d' %
(detector, staves_arr.shape[0]))
self.assertAlmostEqual(
np.mean(staves_arr[0, :, 1]), # average y position
expected_y,
delta=2.0)
self.assertAlmostEqual(staffline_distances[0], 11, delta=1.0)
self.assertLessEqual(staffline_thickness, 3)
def test_corpus_image(self):
# Test only the default staff detector (because projection won't detect all
# staves).
filename = os.path.join(tf.resource_loader.get_data_files_path(),
'../testdata/IMSLP00747-000.png')
image_t = omr_image.decode_music_score_png(tf.read_file(filename))
detector = staves.StaffDetector(image_t)
with self.test_session() as sess:
staves_arr, staffline_distances = sess.run(
[detector.staves, detector.staffline_distance])
self.assertAllClose(
np.mean(staves_arr[:, :, 1], axis=1), # average y position
[413, 603, 848, 1040, 1286, 1476, 1724, 1915, 2162, 2354, 2604, 2795],
atol=5)
self.assertAllEqual(staffline_distances, [16] * 12)
def test_staves_interpolated_y(self):
# Test staff center line interpolation.
# The sequence of (x, y) points always starts at x = 0 and ends at
# x = width - 1.
staff = tf.constant(
np.array([[[0, 10], [5, 5], [11, 0], [15, 10], [20, 20], [23, 49]]],
np.int32))
with self.test_session():
line_y = testing.FakeStaves(tf.zeros([50, 24]),
staff).staves_interpolated_y[0].eval()
self.assertEquals(
list(line_y), [
10, 9, 8, 7, 6, 5, 4, 3, 3, 2, 1, 0, 2, 5, 8, 10, 12, 14, 16, 18,
20, 30, 39, 49
])
def test_staves_interpolated_y_empty(self):
with self.test_session():
self.assertAllEqual(
testing.FakeStaves(tf.zeros([50, 25]), tf.zeros(
[0, 2, 2], np.int32)).staves_interpolated_y.eval().shape, [0, 25])
def test_staves_interpolated_y_staves_dont_extend_to_edge(self):
staff = tf.constant(np.array([[[5, 10], [12, 8]]], np.int32))
with self.test_session():
# The y values should extend past the endpoints to the edge of the image,
# and should be equal to the y value at the nearest endpoint.
self.assertAllEqual(
testing.FakeStaves(tf.zeros([50, 15]),
staff).staves_interpolated_y[0].eval(),
[10, 10, 10, 10, 10, 10, 10, 9, 9, 9, 9, 8, 8, 8, 8])
def generate_staff_detectors(self, image):
yield staves.ProjectionStaffDetector(image)
yield staves.FilteredHoughStaffDetector(image)
if __name__ == '__main__':
tf.test.main()
|
# Generated by Django 3.0.3 on 2020-10-13 10:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('modelzoo', '0021_auto_20201012_1423'),
]
operations = [
migrations.AddField(
model_name='model',
name='github',
field=models.CharField(blank=True, default='', max_length=100),
),
]
|
"""
* Definition for singly-linked list.
* function ListNode(val, next) {
* this.val = (val===undefined ? 0 : val)
* this.next = (next===undefined ? null : next)
* }
* @param {ListNode} l1
* @param {ListNode} l2
* @return {ListNode}
"""
def addTwoNumbers(l1, l2):
pass
|
from setuptools import setup
setup(
name='get-bittrex-tick',
version='1.0',
description='A useful module',
author='Shadowist',
author_email='shadowist@protonmail.com',
packages=['.'], #same as name
install_requires=["requests"], #external packages as dependencies
)
|
import logging
import tempfile
import time
from ocs_ci.ocs.ui.views import locators, osd_sizes
from ocs_ci.ocs.ui.base_ui import PageNavigator
from ocs_ci.utility.utils import get_ocp_version, TimeoutSampler, run_cmd
from ocs_ci.utility import templating
from ocs_ci.ocs.exceptions import TimeoutExpiredError
from ocs_ci.framework import config
from ocs_ci.ocs import constants, defaults
from ocs_ci.ocs.node import get_worker_nodes
from ocs_ci.deployment.helpers.lso_helpers import add_disk_for_vsphere_platform
logger = logging.getLogger(__name__)
class DeploymentUI(PageNavigator):
"""
Deployment OCS Operator via User Interface
"""
def __init__(self, driver):
super().__init__(driver)
self.ocp_version = get_ocp_version()
self.dep_loc = locators[self.ocp_version]["deployment"]
def verify_disks_lso_attached(self, timeout=600, sleep=20):
"""
Verify Disks Attached
Args:
timeout (int): Time in seconds to wait
sleep (int): Sampling time in seconds
"""
osd_size = config.ENV_DATA.get("device_size", defaults.DEVICE_SIZE)
number_worker_nodes = get_worker_nodes()
capacity = int(osd_size) * len(number_worker_nodes)
if capacity >= 1024:
capacity_str = str(capacity / 1024).rstrip("0").rstrip(".") + " TiB"
else:
capacity_str = str(capacity) + " GiB"
sample = TimeoutSampler(
timeout=timeout,
sleep=sleep,
func=self.check_element_text,
expected_text=capacity_str,
)
if not sample.wait_for_func_status(result=True):
logger.error(f" after {timeout} seconds")
raise TimeoutExpiredError
def create_catalog_source_yaml(self):
"""
Create OLM YAML file
"""
try:
catalog_source_data = templating.load_yaml(constants.CATALOG_SOURCE_YAML)
image = config.DEPLOYMENT.get(
"ocs_registry_image", config.DEPLOYMENT["default_ocs_registry_image"]
)
catalog_source_data["spec"]["image"] = image
catalog_source_manifest = tempfile.NamedTemporaryFile(
mode="w+", prefix="catalog_source_manifest", delete=False
)
templating.dump_data_to_temp_yaml(
catalog_source_data, catalog_source_manifest.name
)
run_cmd(f"oc create -f {catalog_source_manifest.name}", timeout=300)
run_cmd(f"oc create -f {constants.OLM_YAML}", timeout=300)
time.sleep(60)
except Exception as e:
logger.info(e)
def install_ocs_operator(self):
"""
Install OCS Opeartor
"""
self.navigate_operatorhub_page()
logger.info("Search OCS Operator")
self.do_send_keys(
self.dep_loc["search_operators"], text="OpenShift Container Storage"
)
logger.info("Choose OCS Version")
self.do_click(self.dep_loc["choose_ocs_version"], enable_screenshot=True)
logger.info("Click Install OCS")
self.do_click(self.dep_loc["click_install_ocs"], enable_screenshot=True)
self.do_click(self.dep_loc["click_install_ocs_page"], enable_screenshot=True)
self.verify_operator_succeeded(operator="OpenShift Container Storage")
def install_local_storage_operator(self):
"""
Install local storage operator
"""
if config.DEPLOYMENT.get("local_storage"):
self.navigate_operatorhub_page()
logger.info("Search OCS Operator")
self.do_send_keys(self.dep_loc["search_operators"], text="Local Storage")
logger.info("Choose Local Storage Version")
self.do_click(
self.dep_loc["choose_local_storage_version"], enable_screenshot=True
)
logger.info("Click Install LSO")
self.do_click(self.dep_loc["click_install_lso"], enable_screenshot=True)
self.do_click(
self.dep_loc["click_install_lso_page"], enable_screenshot=True
)
self.verify_operator_succeeded(operator="Local Storage")
def install_storage_cluster(self):
"""
Install Storage Cluster
"""
self.search_operator_installed_operators_page()
logger.info("Click on ocs operator on Installed Operators")
self.do_click(
locator=self.dep_loc["ocs_operator_installed"], enable_screenshot=True
)
logger.info("Click on Storage Cluster")
self.do_click(
locator=self.dep_loc["storage_cluster_tab"], enable_screenshot=True
)
logger.info("Click on Create Storage Cluster")
self.refresh_page()
self.do_click(
locator=self.dep_loc["create_storage_cluster"], enable_screenshot=True
)
if config.DEPLOYMENT.get("local_storage"):
self.install_lso_cluster()
else:
self.install_internal_cluster()
def install_lso_cluster(self):
"""
Install LSO cluster via UI
"""
logger.info("Click Internal - Attached Devices")
self.do_click(self.dep_loc["internal-attached_devices"], enable_screenshot=True)
logger.info("Click on All nodes")
self.do_click(self.dep_loc["all_nodes_lso"], enable_screenshot=True)
self.do_click(self.dep_loc["next"], enable_screenshot=True)
logger.info(
f"Configure Volume Set Name and Storage Class Name as {constants.LOCAL_BLOCK_RESOURCE}"
)
self.do_send_keys(
locator=self.dep_loc["lv_name"], text=constants.LOCAL_BLOCK_RESOURCE
)
self.do_send_keys(
locator=self.dep_loc["sc_name"], text=constants.LOCAL_BLOCK_RESOURCE
)
logger.info("Select all nodes on 'Create Storage Class' step")
self.do_click(
locator=self.dep_loc["all_nodes_create_sc"], enable_screenshot=True
)
self.verify_disks_lso_attached()
self.do_click(self.dep_loc["next"], enable_screenshot=True)
logger.info("Confirm new storage class")
self.do_click(self.dep_loc["yes"], enable_screenshot=True)
sample = TimeoutSampler(
timeout=600,
sleep=10,
func=self.check_element_text,
expected_text="Memory",
)
if not sample.wait_for_func_status(result=True):
logger.error("Nodes not found after 600 seconds")
raise TimeoutExpiredError
logger.info(f"Select {constants.LOCAL_BLOCK_RESOURCE} storage class")
self.choose_expanded_mode(
mode=True, locator=self.dep_loc["storage_class_dropdown_lso"]
)
self.do_click(locator=self.dep_loc["localblock_sc"], enable_screenshot=True)
self.do_click(self.dep_loc["next"], enable_screenshot=True)
self.configure_encryption()
self.create_storage_cluster()
def install_internal_cluster(self):
"""
Install Internal Cluster
"""
logger.info("Click Internal")
self.do_click(locator=self.dep_loc["internal_mode"], enable_screenshot=True)
logger.info("Configure Storage Class (thin on vmware, gp2 on aws)")
self.do_click(
locator=self.dep_loc["storage_class_dropdown"], enable_screenshot=True
)
self.do_click(locator=self.dep_loc[self.storage_class], enable_screenshot=True)
device_size = str(config.ENV_DATA.get("device_size"))
osd_size = device_size if device_size in osd_sizes else "512"
logger.info(f"Configure OSD Capacity {osd_size}")
self.choose_expanded_mode(mode=True, locator=self.dep_loc["osd_size_dropdown"])
self.do_click(locator=self.dep_loc[osd_size], enable_screenshot=True)
logger.info("Select all worker nodes")
self.select_checkbox_status(status=True, locator=self.dep_loc["all_nodes"])
if self.ocp_version == "4.6" and config.ENV_DATA.get("encryption_at_rest"):
self.do_click(
locator=self.dep_loc["enable_encryption"], enable_screenshot=True
)
if self.ocp_version in ("4.7", "4.8"):
logger.info("Next on step 'Select capacity and nodes'")
self.do_click(locator=self.dep_loc["next"], enable_screenshot=True)
self.configure_encryption()
self.create_storage_cluster()
def create_storage_cluster(self):
"""
Review and Create storage cluster
"""
logger.info("Create on Review and create page")
self.do_click(locator=self.dep_loc["create_on_review"], enable_screenshot=True)
logger.info("Sleep 10 second after click on 'create storage cluster'")
time.sleep(10)
def configure_encryption(self):
"""
Configure Encryption
"""
if config.ENV_DATA.get("encryption_at_rest"):
logger.info("Enable OSD Encryption")
self.select_checkbox_status(
status=True, locator=self.dep_loc["enable_encryption"]
)
logger.info("Cluster-wide encryption")
self.select_checkbox_status(
status=True, locator=self.dep_loc["wide_encryption"]
)
self.do_click(self.dep_loc["next"], enable_screenshot=True)
def verify_operator_succeeded(
self, operator="OpenShift Container Storage", timeout_install=300, sleep=20
):
"""
Verify Operator Installation
Args:
operator (str): type of operator
timeout_install (int): Time in seconds to wait
sleep (int): Sampling time in seconds
"""
self.search_operator_installed_operators_page(operator=operator)
sample = TimeoutSampler(
timeout=timeout_install,
sleep=sleep,
func=self.check_element_text,
expected_text="Succeeded",
)
if not sample.wait_for_func_status(result=True):
logger.error(
f"{operator} Installation status is not Succeeded after {timeout_install} seconds"
)
raise TimeoutExpiredError
def search_operator_installed_operators_page(
self, operator="OpenShift Container Storage"
):
"""
Search Operator on Installed Operators Page
Args:
operator (str): type of operator
"""
self.navigate_operatorhub_page()
self.navigate_installed_operators_page()
logger.info(f"Search {operator} operator installed")
if self.ocp_version in ("4.7", "4.8"):
self.do_send_keys(
locator=self.dep_loc["search_operator_installed"],
text=operator,
)
# https://bugzilla.redhat.com/show_bug.cgi?id=1899200
elif self.ocp_version == "4.6":
self.do_click(self.dep_loc["project_dropdown"], enable_screenshot=True)
self.do_click(self.dep_loc[operator], enable_screenshot=True)
def install_ocs_ui(self):
"""
Install OCS via UI
"""
if config.DEPLOYMENT.get("local_storage"):
add_disk_for_vsphere_platform()
self.install_local_storage_operator()
self.create_catalog_source_yaml()
self.install_ocs_operator()
self.install_storage_cluster()
|
'''Spark Configuration
In this file we define the key configuration parameters for submitting Spark jobs. Spark can be run
in a variety of deployment contexts. See the Spark documentation at
https://spark.apache.org/docs/latest/submitting-applications.html for a more in-depth summary of
Spark deployment contexts and configuration.
'''
from dagster import Dict, Field, List, Path, String
from .types import SparkDeployMode
from .configs_spark import spark_config
def define_spark_config():
'''Spark configuration.
See the Spark documentation for reference:
https://spark.apache.org/docs/latest/submitting-applications.html
'''
master_url = Field(
String,
description='The master URL for the cluster (e.g. spark://23.195.26.187:7077)',
is_optional=False,
)
deploy_mode = Field(
SparkDeployMode,
description='''Whether to deploy your driver on the worker nodes (cluster) or locally as an
external client (client) (default: client). A common deployment strategy is to submit your
application from a gateway machine that is physically co-located with your worker machines
(e.g. Master node in a standalone EC2 cluster). In this setup, client mode is appropriate.
In client mode, the driver is launched directly within the spark-submit process which acts
as a client to the cluster. The input and output of the application is attached to the
console. Thus, this mode is especially suitable for applications that involve the REPL (e.g.
Spark shell).''',
is_optional=True,
)
application_jar = Field(
Path,
description='''Path to a bundled jar including your application and all
dependencies. The URL must be globally visible inside of your cluster, for
instance, an hdfs:// path or a file:// path that is present on all nodes.
''',
is_optional=False,
)
application_arguments = Field(
String,
description='Arguments passed to the main method of your main class, if any',
is_optional=True,
)
spark_home = Field(
String,
description='The path to your spark installation. Defaults to $SPARK_HOME at runtime if not provided.',
is_optional=True,
)
spark_outputs = Field(List[String], description='The outputs that this Spark job will produce')
return Field(
Dict(
fields={
'master_url': master_url,
'deploy_mode': deploy_mode,
'application_jar': application_jar,
'spark_conf': spark_config(),
'spark_home': spark_home,
'application_arguments': application_arguments,
'spark_outputs': spark_outputs,
}
)
)
|
from .models import StreamItem
class ManyToManyDatabaseBackend(object):
def add_stream_item(self, users, content, created_at):
item = StreamItem.objects.create(content=content, created_at=created_at)
item.users.add(*[user.pk for user in users])
def get_stream_items(self, user):
return StreamItem.objects.filter(users=user)
|
# Separa el texto enviado por el usuario en la forma "comando resto"
# Ejemplo:
# "/comando ASDASd"
# regresa ("comando","ASDASd")
#
# "/comando@MemesBot Test - Test , Red"
# regresa ("comando","Test - Test , Red")
#
# "yao ming"
# regresa ("yao ming","")
def extraer_comando(text):
if not text: return ("","")
text = text.strip()
comando = ""
for i in range(0, len(text)):
if (text[0] == "/" and text[i] == ' '): return (comando, text[i + 1:])
if text[i] == '@' and text[i:i + len("MemesBot")]:
return (comando, text[i + len("MemesBot") + 1:])
comando += text[i]
return (comando, "")
|
import traceback
import unittest
from unittest.mock import Mock, patch
from tests import resources
from pyfootball.models.team import Team
from pyfootball.models.fixture import Fixture
from pyfootball.models.player import Player
class TestTeam(unittest.TestCase):
def test_init(self):
try:
Team(resources.TEAM)
except:
self.fail()
def test_init_bad_data(self):
with self.assertRaises(KeyError):
Team({"a": "dict", "that": "has", "bad": "data"})
class TestTeamAfterInit(unittest.TestCase):
def setUp(self):
try:
self.team = Team(resources.TEAM)
except Exception:
print("Setting up Team object failed:")
traceback.print_exc()
self.skipTest(TestTeamAfterInit)
def tearDown(self):
self.team = None
def test_data_types(self):
integers = ['id']
for attr, val in self.team.__dict__.items():
if attr in integers:
self.assertIsInstance(val, int)
else: # Strings
self.assertIsInstance(val, str)
@patch('pyfootball.models.team.requests.get')
def test_get_fixtures(self, mock_get):
mock_response = mock_get.return_value
mock_response.status_code = 200
mock_response.json.return_value = resources.FIXTURES
fixtures = self.team.get_fixtures()
self.assertIsInstance(fixtures, list)
for fixture in fixtures:
self.assertIsInstance(fixture, Fixture)
@patch('pyfootball.models.team.requests.get')
def test_get_players(self, mock_get):
mock_response = mock_get.return_value
mock_response.status_code = 200
mock_response.json.return_value = resources.PLAYERS
players = self.team.get_players()
self.assertIsInstance(players, list)
for player in players:
self.assertIsInstance(player, Player)
if __name__ == '__main__':
unittest.main()
|
def isStringCharsUnique(incoming_string):
for i in range(0, len(incoming_string)):
for j in range(0, len(incoming_string)):
if (i != j) and (incoming_string[i] == incoming_string[j]):
print('Match Found - Positions: ' + str(i + 1) + ' & ' + str(j + 1)) # "+ 1" to avoid 0 position for readability
return False
return True
incoming_string = str(input())
if isStringCharsUnique(incoming_string) is True:
print('Your string has all unique characters.')
else:
print('Your string does not have all unique characters.')
|
# Add parent folder to path
import sys, os
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import unittest
import numpy as np
from time import perf_counter
from src.Equations.TaitEOS import TaitEOS, TaitEOS_B, TaitEOS_co
class test_numba_taiteos(unittest.TestCase):
def test_vec(self):
rhos = np.linspace(0, 2000, 1_000_000)
# thingies
rho0 = 1000.0
gamma = 7
co = TaitEOS_co(H=2.0)
B = TaitEOS_B(co=co, rho0=rho0, gamma=gamma)
# Calc vectorized
start_vec = perf_counter()
p = TaitEOS(gamma, B, rho0, rhos)
t_vec = perf_counter() - start_vec
t_old = 0
for i, rho in enumerate(rhos):
# Old
start_old = perf_counter()
p_s = self.old_calc(rho, rho0, gamma, B)
t_old += perf_counter() - start_old
# Verify
self.assertAlmostEqual(p_s, p[i], 3)
print('Completed TaitEOS')
print(f'Timing:')
print(f'Old: {t_old:f} [s]')
print(f'New: {t_vec:f} [s]')
print(f'Speed-up: {t_old / t_vec:f}x')
def old_calc(self, rho: float, rho0: float, gamma: float, B: float) -> float:
ratio = rho / rho0
temp = ratio ** gamma
p = (temp - 1.0) * B
return p
if __name__ == "__main__":
test_numba_taiteos().test_vec()
|
# -*- coding: utf8 -*-
import sys
# thank_you 関数を作成する
def thank_you() -> None:
print("""
ありがとう!(∩´∀`)∩
""")
q = input("どういたしまして!")
if q:
sys.exit()
def main() -> None:
thank_you()
if __name__ == '__main__':
main()
|
# Generated by Django 2.2.16 on 2020-10-21 13:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('integration', '0004_gaenotpserversettings'),
]
operations = [
migrations.AddField(
model_name='gaenotpserversettings',
name='server_type',
field=models.CharField(choices=[('NEARFORM', 'NearForm OTP Server'),
('APHL', 'APHL Exposure Notifications')],
default='NEARFORM', max_length=255),
),
]
|
from yowsup.structs import ProtocolEntity, ProtocolTreeNode
from .iq_result_groups import GroupsResultIqProtocolEntity
from ..structs import Group
class ListGroupsResultIqProtocolEntity(GroupsResultIqProtocolEntity):
'''
<iq type="result" from="g.us" id="{{IQ_ID}}">
<group s_t="{{SUBJECT_TIME}}" creation="{{CREATING_TIME}}" owner="{{OWNER_JID}}" id="{{GROUP_ID}}" s_o="{{SUBJECT_OWNER_JID}}" subject="{{SUBJECT}}">
</group>
<group s_t="{{SUBJECT_TIME}}" creation="{{CREATING_TIME}}" owner="{{OWNER_JID}}" id="{{GROUP_ID}}" s_o="{{SUBJECT_OWNER_JID}}" subject="{{SUBJECT}}">
</group>
</iq>
'''
def __init__(self, groupsList):
super(ListGroupsResultIqProtocolEntity, self).__init__(_from = "g.us")
self.setProps(groupsList)
def __str__(self):
out = super(ListGroupsResultIqProtocolEntity, self).__str__()
for g in self.groupsList:
out += "%s" % g
out += "\n"
return out
def setProps(self, groupsList):
assert type(groupsList) is list and len(groupsList) > 0 and groupsList[0].__class__ is Group,\
"groupList must be a list of Group instances"
self.groupsList = groupsList
def toProtocolTreeNode(self):
node = super(ListGroupsResultIqProtocolEntity, self).toProtocolTreeNode()
groupsNodes = [
ProtocolTreeNode("group", {
"id": group.getId(),
"owner": group.getOwner(),
"subject": group.getSubject(),
"s_o": group.getSubjectOwner(),
"s_t": str(group.getSubjectTime()),
"creation": str(group.getCreationTime())
})
for group in self.groupsList
]
node.addChildren(groupsNodes)
return node
@staticmethod
def fromProtocolTreeNode(node):
entity = GroupsResultIqProtocolEntity.fromProtocolTreeNode(node)
entity.__class__ = ListGroupsResultIqProtocolEntity
groups = [
Group(groupNode["id"], groupNode["owner"], groupNode["subject"], groupNode["s_o"], groupNode["s_t"], groupNode["creation"])
for groupNode in node.getAllChildren()
]
entity.setProps(groups)
return entity
|
"""
Utils used by hkp module.
"""
import subprocess
import sys
import os
__all__ = ['cached_property']
class _Missing(object):
def __repr__(self):
return 'no value'
def __reduce__(self):
return '_missing'
_missing = _Missing()
class cached_property(object):
"""A decorator that converts a function into a lazy property. The
function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value::
class Foo(object):
@cached_property
def foo(self):
# calculate something important here
return 42
The class has to have a `__dict__` in order for this property to
work.
Taken from Werkzeug: http://werkzeug.pocoo.org/
"""
# implementation detail: this property is implemented as non-data
# descriptor. non-data descriptors are only invoked if there is
# no entry with the same name in the instance's __dict__.
# this allows us to completely get rid of the access function call
# overhead. If one choses to invoke __get__ by hand the property
# will still work as expected because the lookup logic is replicated
# in __get__ for manual invocation.
def __init__(self, func, name=None, doc=None):
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
def __get__(self, obj, type=None):
if obj is None:
return self
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
class ca(object):
def __init__(
self,
domain='sks-keyservers.net',
pem_url="https://sks-keyservers.net/sks-keyservers.netCA.pem",
pem_filename='sks-keyservers.netCA.pem'
):
self.domain = domain
self.pem_url = pem_url
self.pem_filename = pem_filename
@cached_property
def pem(self):
if sys.platform == "win32":
gpgconfcmd = ["gpgconf.exe", "--list-dirs", "datadir"]
else:
gpgconfcmd = ["/usr/bin/env", "gpgconf", "--list-dirs", "datadir"]
try:
output = subprocess.check_output(gpgconfcmd)
if sys.version_info[0] == 2:
datadir = output.strip()
else:
datadir = output.decode(sys.stdout.encoding).strip()
except subprocess.CalledProcessError:
datadir = ""
pass
pempath = "{0}{1}{2}".format(datadir, os.sep, self.pem_filename)
if os.path.exists(pempath):
pemfile = pempath
else:
pemfile = self.pem_url
return pemfile
def __repr__(self):
return 'CA {0}, PEM {1}'.format(self.domain, self.pem)
def __str__(self):
return repr(self)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import errno
import io
import os
import pickle
import select
import stat
import subprocess
import sys
from contextlib import contextmanager
from shutil import copyfile
from typing import List
from attr import dataclass
from jinja2 import Environment, FileSystemLoader
from pympler import summary
from frontend import frontend_utils
@dataclass
class RetrievedObjects:
pid: int
title: str
data: List[List[int]]
class GDBObject:
def __init__(self, pid, current_path, executable):
self.pid = pid
self.fifo = f"/tmp/memanz_pipe_{self.pid}"
self.current_path = current_path
self.executable = executable
def run_analysis(self, debug=False):
self.create_pipe()
frontend_utils.echo_info(f"Analyzing pid {self.pid}")
command_file = f"{self.current_path}/gdb_commands.py"
command = [
"gdb",
"-q",
# Activates python for GDB.
self.executable,
"-p",
f"{self.pid}",
"-ex",
"set trace-commands on",
f"{'-batch' if debug else '-batch-silent'}",
"-ex",
# Lets gdb find the correct gdb_commands script.
f"set directories {self.current_path}",
"-ex",
# Sets the correct path for gdb_commands, else C-API commands fail.
f'py sys.path.append("{self.current_path}")',
"-x",
f"{command_file}",
]
frontend_utils.echo_info(f"Setting up GDB for pid {self.pid}")
proc = subprocess.Popen(
command, stderr=sys.stderr if debug else subprocess.DEVNULL
)
with self.drain_pipe(proc) as data:
retrieved_objs = RetrievedObjects(
pid=self.pid,
title=f"Analysis for {self.pid}",
data=self.unpickle_pipe(data),
)
self._end_subprocess(proc)
return retrieved_objs
@contextmanager
def drain_pipe(self, process):
"""
We need this because by default, `open`s on named pipes block. If GDB or
the injected GDB extension in Python crash, the process will never write
to the pipe and we will block opening and `memory_analyzer` won't exit.
"""
try:
pipe = os.open(self.fifo, os.O_RDONLY | os.O_NONBLOCK)
result = io.BytesIO()
timeout = 0.1 # seconds
partial_read = None
while bool(partial_read) or process.poll() is None:
ready_fds, _, _ = select.select([pipe], [], [], timeout)
if len(ready_fds) > 0:
ready_fd = ready_fds[0]
try:
partial_read = os.read(ready_fd, select.PIPE_BUF)
except BlockingIOError:
partial_read = None
if partial_read:
result.write(partial_read)
result.seek(0)
yield result
result.close()
except Exception as e:
frontend_utils.echo_error(f"Failed with {e}")
self._end_subprocess(process)
sys.exit(1)
finally:
os.close(pipe)
def _end_subprocess(self, proc):
try:
proc.wait(5)
except TimeoutError:
proc.kill()
def create_pipe(self):
try:
os.mkfifo(self.fifo)
os.chmod(str(self.fifo), 0o666)
except OSError as oe:
if oe.errno != errno.EEXIST:
raise
def unpickle_pipe(self, fifo):
frontend_utils.echo_info("Gathering data...")
try:
items = pickle.load(fifo)
if items:
if isinstance(items, Exception):
raise items
return items
except EOFError:
return
except pickle.UnpicklingError as e:
frontend_utils.echo_error(f"Error retrieving data from process: {e}")
raise
except Exception as e:
frontend_utils.echo_error(
f"{type(e).__name__} occurred during analysis: {e}"
)
raise
def load_template(name, templates_path):
env = Environment(autoescape=False, loader=FileSystemLoader(templates_path))
return env.get_template(name)
def render_template(
template_name, templates_path, num_refs, pid, specific_refs, output_file
):
objgraph_template = load_template(template_name, templates_path)
template = objgraph_template.render(
num_refs=num_refs, pid=pid, specific_refs=specific_refs, output_file=output_file
)
with open(f"{templates_path}rendered_template-{pid}.py.out", "w") as f:
f.write(template)
return template
def snapshot_diff(cur_items, snapshot_file):
"""
Attempts to compare like PIDs. If like PIDS can't be found it will just compare
the first PID listed to the first PID in the file. Any unmatched or non-first
PIDs will be ignored because we don't know what to compare them to.
"""
try:
prev_items = list(frontend_utils.get_pages(snapshot_file))
except pickle.UnpicklingError as e:
frontend_utils.echo_error(
f"Error unpickling the data from {snapshot_file}: {e}"
)
return None
differences = []
for cur_item in cur_items:
for prev_item in prev_items:
if cur_item.pid == prev_item.pid:
diff = summary.get_diff(cur_item.data, prev_item.data)
differences.append(
RetrievedObjects(
pid=cur_item.pid,
title=f"Snapshot Differences for {cur_item.pid}",
data=diff,
)
)
if not differences:
diff = summary.get_diff(cur_items[0].data, prev_items[0].data)
differences.append(
RetrievedObjects(pid=0, title=f"Snapshot Differences", data=diff)
)
return differences
|
import matplotlib.pyplot as plt
import numpy as np
def record_losses(input):
f=open(input,encoding='utf-8')
re_list=[]
for line in f:
li_str=str(line)
if li_str.find("losses")!=-1:
id=li_str.find("losses")
re_list.append(float(li_str[id+8:-1]))
print(re_list)
return re_list
if __name__=="__main__":
losses_list1=record_losses('__main__.info_pcl+I2G.log')
print(len(losses_list1))
losses_list2=record_losses('__main__.info_pcl10.log')
print(len(losses_list2))
x=np.arange(1,101,1)
plt.plot(x,losses_list1,label='pcl+I2G')
plt.plot(x,losses_list2,label='pcl')
plt.legend(['pcl+I2G','pcl'])
plt.xlabel("Epoch")
plt.ylabel("Losses")
plt.show()
|
import requests
import random
import json
import hashlib
import hmac
import urllib
import uuid
import time
import copy
import math
import sys
from datetime import datetime
import calendar
import os
# Turn off InsecureRequestWarning
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
try:
from moviepy.editor import VideoFileClip
except:
print("Спасибо за использование этого бота\n")
# The urllib library was split into other modules from Python 2 to Python 3
if sys.version_info.major == 3:
import urllib.parse
try:
from ImageUtils import getImageSize
except:
# Issue 159, python3 import fix
from .ImageUtils import getImageSize
class InstagramAPI:
API_URL = 'https://i.instagram.com/api/v1/'
DEVICE_SETTINTS = {'manufacturer': 'Xiaomi',
'model': 'HM 1SW',
'android_version': 18,
'android_release': '4.3'}
USER_AGENT = 'Instagram 10.26.0 Android ({android_version}/{android_release}; 320dpi; 720x1280; {manufacturer}; {model}; armani; qcom; en_US)'.format(**DEVICE_SETTINTS)
IG_SIG_KEY = '4f8732eb9ba7d1c8e8897a75d6474d4eb3f5279137431b2aafb71fafe2abe178'
EXPERIMENTS = 'ig_promote_reach_objective_fix_universe,ig_android_universe_video_production,ig_search_client_h1_2017_holdout,ig_android_live_follow_from_comments_universe,ig_android_carousel_non_square_creation,ig_android_live_analytics,ig_android_follow_all_dialog_confirmation_copy,ig_android_stories_server_coverframe,ig_android_video_captions_universe,ig_android_offline_location_feed,ig_android_direct_inbox_retry_seen_state,ig_android_ontact_invite_universe,ig_android_live_broadcast_blacklist,ig_android_insta_video_reconnect_viewers,ig_android_ad_async_ads_universe,ig_android_search_clear_layout_universe,ig_android_shopping_reporting,ig_android_stories_surface_universe,ig_android_verified_comments_universe,ig_android_preload_media_ahead_in_current_reel,android_instagram_prefetch_suggestions_universe,ig_android_reel_viewer_fetch_missing_reels_universe,ig_android_direct_search_share_sheet_universe,ig_android_business_promote_tooltip,ig_android_direct_blue_tab,ig_android_async_network_tweak_universe,ig_android_elevate_main_thread_priority_universe,ig_android_stories_gallery_nux,ig_android_instavideo_remove_nux_comments,ig_video_copyright_whitelist,ig_react_native_inline_insights_with_relay,ig_android_direct_thread_message_animation,ig_android_draw_rainbow_client_universe,ig_android_direct_link_style,ig_android_live_heart_enhancements_universe,ig_android_rtc_reshare,ig_android_preload_item_count_in_reel_viewer_buffer,ig_android_users_bootstrap_service,ig_android_auto_retry_post_mode,ig_android_shopping,ig_android_main_feed_seen_state_dont_send_info_on_tail_load,ig_fbns_preload_default,ig_android_gesture_dismiss_reel_viewer,ig_android_tool_tip,ig_android_ad_logger_funnel_logging_universe,ig_android_gallery_grid_column_count_universe,ig_android_business_new_ads_payment_universe,ig_android_direct_links,ig_android_audience_control,ig_android_live_encore_consumption_settings_universe,ig_perf_android_holdout,ig_android_cache_contact_import_list,ig_android_links_receivers,ig_android_ad_impression_backtest,ig_android_list_redesign,ig_android_stories_separate_overlay_creation,ig_android_stop_video_recording_fix_universe,ig_android_render_video_segmentation,ig_android_live_encore_reel_chaining_universe,ig_android_sync_on_background_enhanced_10_25,ig_android_immersive_viewer,ig_android_mqtt_skywalker,ig_fbns_push,ig_android_ad_watchmore_overlay_universe,ig_android_react_native_universe,ig_android_profile_tabs_redesign_universe,ig_android_live_consumption_abr,ig_android_story_viewer_social_context,ig_android_hide_post_in_feed,ig_android_video_loopcount_int,ig_android_enable_main_feed_reel_tray_preloading,ig_android_camera_upsell_dialog,ig_android_ad_watchbrowse_universe,ig_android_internal_research_settings,ig_android_search_people_tag_universe,ig_android_react_native_ota,ig_android_enable_concurrent_request,ig_android_react_native_stories_grid_view,ig_android_business_stories_inline_insights,ig_android_log_mediacodec_info,ig_android_direct_expiring_media_loading_errors,ig_video_use_sve_universe,ig_android_cold_start_feed_request,ig_android_enable_zero_rating,ig_android_reverse_audio,ig_android_branded_content_three_line_ui_universe,ig_android_live_encore_production_universe,ig_stories_music_sticker,ig_android_stories_teach_gallery_location,ig_android_http_stack_experiment_2017,ig_android_stories_device_tilt,ig_android_pending_request_search_bar,ig_android_fb_topsearch_sgp_fork_request,ig_android_seen_state_with_view_info,ig_android_animation_perf_reporter_timeout,ig_android_new_block_flow,ig_android_story_tray_title_play_all_v2,ig_android_direct_address_links,ig_android_stories_archive_universe,ig_android_save_collections_cover_photo,ig_android_live_webrtc_livewith_production,ig_android_sign_video_url,ig_android_stories_video_prefetch_kb,ig_android_stories_create_flow_favorites_tooltip,ig_android_live_stop_broadcast_on_404,ig_android_live_viewer_invite_universe,ig_android_promotion_feedback_channel,ig_android_render_iframe_interval,ig_android_accessibility_logging_universe,ig_android_camera_shortcut_universe,ig_android_use_one_cookie_store_per_user_override,ig_profile_holdout_2017_universe,ig_android_stories_server_brushes,ig_android_ad_media_url_logging_universe,ig_android_shopping_tag_nux_text_universe,ig_android_comments_single_reply_universe,ig_android_stories_video_loading_spinner_improvements,ig_android_collections_cache,ig_android_comment_api_spam_universe,ig_android_facebook_twitter_profile_photos,ig_android_shopping_tag_creation_universe,ig_story_camera_reverse_video_experiment,ig_android_direct_bump_selected_recipients,ig_android_ad_cta_haptic_feedback_universe,ig_android_vertical_share_sheet_experiment,ig_android_family_bridge_share,ig_android_search,ig_android_insta_video_consumption_titles,ig_android_stories_gallery_preview_button,ig_android_fb_auth_education,ig_android_camera_universe,ig_android_me_only_universe,ig_android_instavideo_audio_only_mode,ig_android_user_profile_chaining_icon,ig_android_live_video_reactions_consumption_universe,ig_android_stories_hashtag_text,ig_android_post_live_badge_universe,ig_android_swipe_fragment_container,ig_android_search_users_universe,ig_android_live_save_to_camera_roll_universe,ig_creation_growth_holdout,ig_android_sticker_region_tracking,ig_android_unified_inbox,ig_android_live_new_watch_time,ig_android_offline_main_feed_10_11,ig_import_biz_contact_to_page,ig_android_live_encore_consumption_universe,ig_android_experimental_filters,ig_android_search_client_matching_2,ig_android_react_native_inline_insights_v2,ig_android_business_conversion_value_prop_v2,ig_android_redirect_to_low_latency_universe,ig_android_ad_show_new_awr_universe,ig_family_bridges_holdout_universe,ig_android_background_explore_fetch,ig_android_following_follower_social_context,ig_android_video_keep_screen_on,ig_android_ad_leadgen_relay_modern,ig_android_profile_photo_as_media,ig_android_insta_video_consumption_infra,ig_android_ad_watchlead_universe,ig_android_direct_prefetch_direct_story_json,ig_android_shopping_react_native,ig_android_top_live_profile_pics_universe,ig_android_direct_phone_number_links,ig_android_stories_weblink_creation,ig_android_direct_search_new_thread_universe,ig_android_histogram_reporter,ig_android_direct_on_profile_universe,ig_android_network_cancellation,ig_android_background_reel_fetch,ig_android_react_native_insights,ig_android_insta_video_audio_encoder,ig_android_family_bridge_bookmarks,ig_android_data_usage_network_layer,ig_android_universal_instagram_deep_links,ig_android_dash_for_vod_universe,ig_android_modular_tab_discover_people_redesign,ig_android_mas_sticker_upsell_dialog_universe,ig_android_ad_add_per_event_counter_to_logging_event,ig_android_sticky_header_top_chrome_optimization,ig_android_rtl,ig_android_biz_conversion_page_pre_select,ig_android_promote_from_profile_button,ig_android_live_broadcaster_invite_universe,ig_android_share_spinner,ig_android_text_action,ig_android_own_reel_title_universe,ig_promotions_unit_in_insights_landing_page,ig_android_business_settings_header_univ,ig_android_save_longpress_tooltip,ig_android_constrain_image_size_universe,ig_android_business_new_graphql_endpoint_universe,ig_ranking_following,ig_android_stories_profile_camera_entry_point,ig_android_universe_reel_video_production,ig_android_power_metrics,ig_android_sfplt,ig_android_offline_hashtag_feed,ig_android_live_skin_smooth,ig_android_direct_inbox_search,ig_android_stories_posting_offline_ui,ig_android_sidecar_video_upload_universe,ig_android_promotion_manager_entry_point_universe,ig_android_direct_reply_audience_upgrade,ig_android_swipe_navigation_x_angle_universe,ig_android_offline_mode_holdout,ig_android_live_send_user_location,ig_android_direct_fetch_before_push_notif,ig_android_non_square_first,ig_android_insta_video_drawing,ig_android_swipeablefilters_universe,ig_android_live_notification_control_universe,ig_android_analytics_logger_running_background_universe,ig_android_save_all,ig_android_reel_viewer_data_buffer_size,ig_direct_quality_holdout_universe,ig_android_family_bridge_discover,ig_android_react_native_restart_after_error_universe,ig_android_startup_manager,ig_story_tray_peek_content_universe,ig_android_profile,ig_android_high_res_upload_2,ig_android_http_service_same_thread,ig_android_scroll_to_dismiss_keyboard,ig_android_remove_followers_universe,ig_android_skip_video_render,ig_android_story_timestamps,ig_android_live_viewer_comment_prompt_universe,ig_profile_holdout_universe,ig_android_react_native_insights_grid_view,ig_stories_selfie_sticker,ig_android_stories_reply_composer_redesign,ig_android_streamline_page_creation,ig_explore_netego,ig_android_ig4b_connect_fb_button_universe,ig_android_feed_util_rect_optimization,ig_android_rendering_controls,ig_android_os_version_blocking,ig_android_encoder_width_safe_multiple_16,ig_search_new_bootstrap_holdout_universe,ig_android_snippets_profile_nux,ig_android_e2e_optimization_universe,ig_android_comments_logging_universe,ig_shopping_insights,ig_android_save_collections,ig_android_live_see_fewer_videos_like_this_universe,ig_android_show_new_contact_import_dialog,ig_android_live_view_profile_from_comments_universe,ig_fbns_blocked,ig_formats_and_feedbacks_holdout_universe,ig_android_reduce_view_pager_buffer,ig_android_instavideo_periodic_notif,ig_search_user_auto_complete_cache_sync_ttl,ig_android_marauder_update_frequency,ig_android_suggest_password_reset_on_oneclick_login,ig_android_promotion_entry_from_ads_manager_universe,ig_android_live_special_codec_size_list,ig_android_enable_share_to_messenger,ig_android_background_main_feed_fetch,ig_android_live_video_reactions_creation_universe,ig_android_channels_home,ig_android_sidecar_gallery_universe,ig_android_upload_reliability_universe,ig_migrate_mediav2_universe,ig_android_insta_video_broadcaster_infra_perf,ig_android_business_conversion_social_context,android_ig_fbns_kill_switch,ig_android_live_webrtc_livewith_consumption,ig_android_destroy_swipe_fragment,ig_android_react_native_universe_kill_switch,ig_android_stories_book_universe,ig_android_all_videoplayback_persisting_sound,ig_android_draw_eraser_universe,ig_direct_search_new_bootstrap_holdout_universe,ig_android_cache_layer_bytes_threshold,ig_android_search_hash_tag_and_username_universe,ig_android_business_promotion,ig_android_direct_search_recipients_controller_universe,ig_android_ad_show_full_name_universe,ig_android_anrwatchdog,ig_android_qp_kill_switch,ig_android_2fac,ig_direct_bypass_group_size_limit_universe,ig_android_promote_simplified_flow,ig_android_share_to_whatsapp,ig_android_hide_bottom_nav_bar_on_discover_people,ig_fbns_dump_ids,ig_android_hands_free_before_reverse,ig_android_skywalker_live_event_start_end,ig_android_live_join_comment_ui_change,ig_android_direct_search_story_recipients_universe,ig_android_direct_full_size_gallery_upload,ig_android_ad_browser_gesture_control,ig_channel_server_experiments,ig_android_video_cover_frame_from_original_as_fallback,ig_android_ad_watchinstall_universe,ig_android_ad_viewability_logging_universe,ig_android_new_optic,ig_android_direct_visual_replies,ig_android_stories_search_reel_mentions_universe,ig_android_threaded_comments_universe,ig_android_mark_reel_seen_on_Swipe_forward,ig_internal_ui_for_lazy_loaded_modules_experiment,ig_fbns_shared,ig_android_capture_slowmo_mode,ig_android_live_viewers_list_search_bar,ig_android_video_single_surface,ig_android_offline_reel_feed,ig_android_video_download_logging,ig_android_last_edits,ig_android_exoplayer_4142,ig_android_post_live_viewer_count_privacy_universe,ig_android_activity_feed_click_state,ig_android_snippets_haptic_feedback,ig_android_gl_drawing_marks_after_undo_backing,ig_android_mark_seen_state_on_viewed_impression,ig_android_live_backgrounded_reminder_universe,ig_android_live_hide_viewer_nux_universe,ig_android_live_monotonic_pts,ig_android_search_top_search_surface_universe,ig_android_user_detail_endpoint,ig_android_location_media_count_exp_ig,ig_android_comment_tweaks_universe,ig_android_ad_watchmore_entry_point_universe,ig_android_top_live_notification_universe,ig_android_add_to_last_post,ig_save_insights,ig_android_live_enhanced_end_screen_universe,ig_android_ad_add_counter_to_logging_event,ig_android_blue_token_conversion_universe,ig_android_exoplayer_settings,ig_android_progressive_jpeg,ig_android_offline_story_stickers,ig_android_gqls_typing_indicator,ig_android_chaining_button_tooltip,ig_android_video_prefetch_for_connectivity_type,ig_android_use_exo_cache_for_progressive,ig_android_samsung_app_badging,ig_android_ad_holdout_watchandmore_universe,ig_android_offline_commenting,ig_direct_stories_recipient_picker_button,ig_insights_feedback_channel_universe,ig_android_insta_video_abr_resize,ig_android_insta_video_sound_always_on'''
SIG_KEY_VERSION = '4'
# username # Instagram username
# password # Instagram password
# debug # Debug
# uuid # UUID
# device_id # Device ID
# username_id # Username ID
# token # _csrftoken
# isLoggedIn # Session status
# rank_token # Rank token
# IGDataPath # Data storage path
def __init__(self, username, password, debug=False, IGDataPath=None):
m = hashlib.md5()
m.update(username.encode('utf-8') + password.encode('utf-8'))
self.device_id = self.generateDeviceId(m.hexdigest())
self.setUser(username, password)
self.isLoggedIn = False
self.LastResponse = None
self.s = requests.Session()
def setUser(self, username, password):
self.username = username
self.password = password
self.uuid = self.generateUUID(True)
def setProxy(self, proxy=None):
"""
Set proxy for all requests::
Proxy format - user:password@ip:port
"""
if proxy is not None:
print('Set proxy!')
proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy}
self.s.proxies.update(proxies)
def login(self, force=False):
if (not self.isLoggedIn or force):
if (self.SendRequest('si/fetch_headers/?challenge_type=signup&guid=' + self.generateUUID(False), None, True)):
data = {'phone_id': self.generateUUID(True),
'_csrftoken': self.LastResponse.cookies['csrftoken'],
'username': self.username,
'guid': self.uuid,
'device_id': self.device_id,
'password': self.password,
'login_attempt_count': '0'}
if (self.SendRequest('accounts/login/', self.generateSignature(json.dumps(data)), True)):
self.isLoggedIn = True
self.username_id = self.LastJson["logged_in_user"]["pk"]
self.rank_token = "%s_%s" % (self.username_id, self.uuid)
self.token = self.LastResponse.cookies["csrftoken"]
self.syncFeatures()
self.autoCompleteUserList()
self.timelineFeed()
self.getv2Inbox()
self.getRecentActivity()
print("Login success!\n")
return True
def syncFeatures(self):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'id': self.username_id,
'_csrftoken': self.token,
'experiments': self.EXPERIMENTS})
return self.SendRequest('qe/sync/', self.generateSignature(data))
def autoCompleteUserList(self):
return self.SendRequest('friendships/autocomplete_user_list/')
def timelineFeed(self):
return self.SendRequest('feed/timeline/')
def megaphoneLog(self):
return self.SendRequest('megaphone/log/')
def expose(self):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'id': self.username_id,
'_csrftoken': self.token,
'experiment': 'ig_android_profile_contextual_feed'})
return self.SendRequest('qe/expose/', self.generateSignature(data))
def logout(self):
logout = self.SendRequest('accounts/logout/')
def uploadPhoto(self, photo, caption=None, upload_id=None, is_sidecar=None):
if upload_id is None:
upload_id = str(int(time.time() * 1000))
data = {'upload_id': upload_id,
'_uuid': self.uuid,
'_csrftoken': self.token,
'image_compression': '{"lib_name":"jt","lib_version":"1.3.0","quality":"87"}',
'photo': ('pending_media_%s.jpg' % upload_id, open(photo, 'rb'), 'application/octet-stream', {'Content-Transfer-Encoding': 'binary'})}
if is_sidecar:
data['is_sidecar'] = '1'
m = MultipartEncoder(data, boundary=self.uuid)
self.s.headers.update({'X-IG-Capabilities': '3Q4=',
'X-IG-Connection-Type': 'WIFI',
'Cookie2': '$Version=1',
'Accept-Language': 'en-US',
'Accept-Encoding': 'gzip, deflate',
'Content-type': m.content_type,
'Connection': 'close',
'User-Agent': self.USER_AGENT})
response = self.s.post(self.API_URL + "upload/photo/", data=m.to_string())
if response.status_code == 200:
if self.configure(upload_id, photo, caption):
self.expose()
return False
def uploadVideo(self, video, thumbnail, caption=None, upload_id=None, is_sidecar=None):
if upload_id is None:
upload_id = str(int(time.time() * 1000))
data = {'upload_id': upload_id,
'_csrftoken': self.token,
'media_type': '2',
'_uuid': self.uuid}
if is_sidecar:
data['is_sidecar'] = '1'
m = MultipartEncoder(data, boundary=self.uuid)
self.s.headers.update({'X-IG-Capabilities': '3Q4=',
'X-IG-Connection-Type': 'WIFI',
'Host': 'i.instagram.com',
'Cookie2': '$Version=1',
'Accept-Language': 'en-US',
'Accept-Encoding': 'gzip, deflate',
'Content-type': m.content_type,
'Connection': 'keep-alive',
'User-Agent': self.USER_AGENT})
response = self.s.post(self.API_URL + "upload/video/", data=m.to_string())
if response.status_code == 200:
body = json.loads(response.text)
upload_url = body['video_upload_urls'][3]['url']
upload_job = body['video_upload_urls'][3]['job']
videoData = open(video, 'rb').read()
# solve issue #85 TypeError: slice indices must be integers or None or have an __index__ method
request_size = int(math.floor(len(videoData) / 4))
lastRequestExtra = (len(videoData) - (request_size * 3))
headers = copy.deepcopy(self.s.headers)
self.s.headers.update({'X-IG-Capabilities': '3Q4=',
'X-IG-Connection-Type': 'WIFI',
'Cookie2': '$Version=1',
'Accept-Language': 'en-US',
'Accept-Encoding': 'gzip, deflate',
'Content-type': 'application/octet-stream',
'Session-ID': upload_id,
'Connection': 'keep-alive',
'Content-Disposition': 'attachment; filename="video.mov"',
'job': upload_job,
'Host': 'upload.instagram.com',
'User-Agent': self.USER_AGENT})
for i in range(0, 4):
start = i * request_size
if i == 3:
end = i * request_size + lastRequestExtra
else:
end = (i + 1) * request_size
length = lastRequestExtra if i == 3 else request_size
content_range = "bytes {start}-{end}/{lenVideo}".format(start=start, end=(end - 1),
lenVideo=len(videoData)).encode('utf-8')
self.s.headers.update({'Content-Length': str(end - start), 'Content-Range': content_range, })
response = self.s.post(upload_url, data=videoData[start:start + length])
self.s.headers = headers
if response.status_code == 200:
if self.configureVideo(upload_id, video, thumbnail, caption):
self.expose()
return False
def uploadAlbum(self, media, caption=None, upload_id=None):
if not media:
raise Exception("List of media to upload can't be empty.")
if len(media) < 2 or len(media) > 10:
raise Exception('Instagram requires that albums contain 2-10 items. You tried to submit {}.'.format(len(media)))
# Figure out the media file details for ALL media in the album.
# NOTE: We do this first, since it validates whether the media files are
# valid and lets us avoid wasting time uploading totally invalid albums!
for idx, item in enumerate(media):
if not item.get('file', '') or item.get('tipe', ''):
raise Exception('Media at index "{}" does not have the required "file" and "type" keys.'.format(idx))
# $itemInternalMetadata = new InternalMetadata();
# If usertags are provided, verify that the entries are valid.
if item.get('usertags', []):
self.throwIfInvalidUsertags(item['usertags'])
# Pre-process media details and throw if not allowed on Instagram.
if item.get('type', '') == 'photo':
# Determine the photo details.
# $itemInternalMetadata->setPhotoDetails(Constants::FEED_TIMELINE_ALBUM, $item['file']);
pass
elif item.get('type', '') == 'video':
# Determine the video details.
# $itemInternalMetadata->setVideoDetails(Constants::FEED_TIMELINE_ALBUM, $item['file']);
pass
else:
raise Exception('Unsupported album media type "{}".'.format(item['type']))
itemInternalMetadata = {}
item['internalMetadata'] = itemInternalMetadata
# Perform all media file uploads.
for idx, item in enumerate(media):
itemInternalMetadata = item['internalMetadata']
item_upload_id = self.generateUploadId()
if item.get('type', '') == 'photo':
self.uploadPhoto(item['file'], caption=caption, is_sidecar=True, upload_id=item_upload_id)
# $itemInternalMetadata->setPhotoUploadResponse($this->ig->internal->uploadPhotoData(Constants::FEED_TIMELINE_ALBUM, $itemInternalMetadata));
elif item.get('type', '') == 'video':
# Attempt to upload the video data.
self.uploadVideo(item['file'], item['thumbnail'], caption=caption, is_sidecar=True, upload_id=item_upload_id)
# $itemInternalMetadata = $this->ig->internal->uploadVideo(Constants::FEED_TIMELINE_ALBUM, $item['file'], $itemInternalMetadata);
# Attempt to upload the thumbnail, associated with our video's ID.
# $itemInternalMetadata->setPhotoUploadResponse($this->ig->internal->uploadPhotoData(Constants::FEED_TIMELINE_ALBUM, $itemInternalMetadata));
pass
item['internalMetadata']['upload_id'] = item_upload_id
albumInternalMetadata = {}
return self.configureTimelineAlbum(media, albumInternalMetadata, captionText=caption)
def throwIfInvalidUsertags(self, usertags):
for user_position in usertags:
# Verify this usertag entry, ensuring that the entry is format
# ['position'=>[0.0,1.0],'user_id'=>'123'] and nothing else.
correct = True
if isinstance(user_position, dict):
position = user_position.get('position', None)
user_id = user_position.get('user_id', None)
if isinstance(position, list) and len(position) == 2:
try:
x = float(position[0])
y = float(position[1])
if x < 0.0 or x > 1.0:
correct = False
if y < 0.0 or y > 1.0:
correct = False
except:
correct = False
try:
user_id = long(user_id)
if user_id < 0:
correct = False
except:
correct = False
if not correct:
raise Exception('Invalid user entry in usertags array.')
def configureTimelineAlbum(self, media, albumInternalMetadata, captionText='', location=None):
endpoint = 'media/configure_sidecar/'
albumUploadId = self.generateUploadId()
date = datetime.utcnow().isoformat()
childrenMetadata = []
for item in media:
itemInternalMetadata = item['internalMetadata']
uploadId = itemInternalMetadata.get('upload_id', self.generateUploadId())
if item.get('type', '') == 'photo':
# Build this item's configuration.
photoConfig = {'date_time_original': date,
'scene_type': 1,
'disable_comments': False,
'upload_id': uploadId,
'source_type': 0,
'scene_capture_type': 'standard',
'date_time_digitized': date,
'geotag_enabled': False,
'camera_position': 'back',
'edits': {'filter_strength': 1,
'filter_name': 'IGNormalFilter'}
}
# This usertag per-file EXTERNAL metadata is only supported for PHOTOS!
if item.get('usertags', []):
# NOTE: These usertags were validated in Timeline::uploadAlbum.
photoConfig['usertags'] = json.dumps({'in': item['usertags']})
childrenMetadata.append(photoConfig)
if item.get('type', '') == 'video':
# Get all of the INTERNAL per-VIDEO metadata.
videoDetails = itemInternalMetadata.get('video_details', {})
# Build this item's configuration.
videoConfig = {'length': videoDetails.get('duration', 1.0),
'date_time_original': date,
'scene_type': 1,
'poster_frame_index': 0,
'trim_type': 0,
'disable_comments': False,
'upload_id': uploadId,
'source_type': 'library',
'geotag_enabled': False,
'edits': {
'length': videoDetails.get('duration', 1.0),
'cinema': 'unsupported',
'original_length': videoDetails.get('duration', 1.0),
'source_type': 'library',
'start_time': 0,
'camera_position': 'unknown',
'trim_type': 0}
}
childrenMetadata.append(videoConfig)
# Build the request...
data = {'_csrftoken': self.token,
'_uid': self.username_id,
'_uuid': self.uuid,
'client_sidecar_id': albumUploadId,
'caption': captionText,
'children_metadata': childrenMetadata}
self.SendRequest(endpoint, self.generateSignature(json.dumps(data)))
response = self.LastResponse
if response.status_code == 200:
self.LastResponse = response
self.LastJson = json.loads(response.text)
return True
else:
print("Request return " + str(response.status_code) + " error!")
# for debugging
try:
self.LastResponse = response
self.LastJson = json.loads(response.text)
except:
pass
return False
def sendMessage(self, target_user, msgText):
target_user = '[[{}]]'.format(','.join([target_user]))
url = 'direct_v2/threads/broadcast/text/'
data = {
'text': msgText,
'_uuid': self.uuid,
'_csrftoken': self.token,
'recipient_users': target_user,
'_uid': self.username_id,
'action': 'send_item',
'client_context': self.generateUUID(True)}
return self.SendRequest(url, data)
def direct_share(self, media_id, recipients, text=None):
if not isinstance(position, list):
recipients = [str(recipients)]
recipient_users = '"",""'.join(str(r) for r in recipients)
endpoint = 'direct_v2/threads/broadcast/media_share/?media_type=photo'
boundary = self.uuid
bodies = [
{
'type': 'form-data',
'name': 'media_id',
'data': media_id,
},
{
'type': 'form-data',
'name': 'recipient_users',
'data': '[["{}"]]'.format(recipient_users),
},
{
'type': 'form-data',
'name': 'client_context',
'data': self.uuid,
},
{
'type': 'form-data',
'name': 'thread',
'data': '["0"]',
},
{
'type': 'form-data',
'name': 'text',
'data': text or '',
},
]
data = self.buildBody(bodies, boundary)
self.s.headers.update({'User-Agent': self.USER_AGENT,
'Proxy-Connection': 'keep-alive',
'Connection': 'keep-alive',
'Accept': '*/*',
'Content-Type': 'multipart/form-data; boundary={}'.format(boundary),
'Accept-Language': 'en-en'})
# self.SendRequest(endpoint,post=data) #overwrites 'Content-type' header and boundary is missed
response = self.s.post(self.API_URL + endpoint, data=data)
if response.status_code == 200:
self.LastResponse = response
self.LastJson = json.loads(response.text)
return True
else:
print("Request return " + str(response.status_code) + " error!")
# for debugging
try:
self.LastResponse = response
self.LastJson = json.loads(response.text)
except:
pass
return False
def configureVideo(self, upload_id, video, thumbnail, caption=''):
clip = VideoFileClip(video)
self.uploadPhoto(photo=thumbnail, caption=caption, upload_id=upload_id)
data = json.dumps({
'upload_id': upload_id,
'source_type': 3,
'poster_frame_index': 0,
'length': 0.00,
'audio_muted': False,
'filter_type': 0,
'video_result': 'deprecated',
'clips': {
'length': clip.duration,
'source_type': '3',
'camera_position': 'back',
},
'extra': {
'source_width': clip.size[0],
'source_height': clip.size[1],
},
'device': self.DEVICE_SETTINTS,
'_csrftoken': self.token,
'_uuid': self.uuid,
'_uid': self.username_id,
'caption': caption,
})
return self.SendRequest('media/configure/?video=1', self.generateSignature(data))
def configure(self, upload_id, photo, caption=''):
(w, h) = getImageSize(photo)
data = json.dumps({'_csrftoken': self.token,
'media_folder': 'Instagram',
'source_type': 4,
'_uid': self.username_id,
'_uuid': self.uuid,
'caption': caption,
'upload_id': upload_id,
'device': self.DEVICE_SETTINTS,
'edits': {
'crop_original_size': [w * 1.0, h * 1.0],
'crop_center': [0.0, 0.0],
'crop_zoom': 1.0
},
'extra': {
'source_width': w,
'source_height': h
}})
return self.SendRequest('media/configure/?', self.generateSignature(data))
def editMedia(self, mediaId, captionText=''):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'_csrftoken': self.token,
'caption_text': captionText})
return self.SendRequest('media/' + str(mediaId) + '/edit_media/', self.generateSignature(data))
def removeSelftag(self, mediaId):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'_csrftoken': self.token})
return self.SendRequest('media/' + str(mediaId) + '/remove/', self.generateSignature(data))
def mediaInfo(self, mediaId):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'_csrftoken': self.token,
'media_id': mediaId})
return self.SendRequest('media/' + str(mediaId) + '/info/', self.generateSignature(data))
def deleteMedia(self, mediaId):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'_csrftoken': self.token,
'media_id': mediaId})
return self.SendRequest('media/' + str(mediaId) + '/delete/', self.generateSignature(data))
def changePassword(self, newPassword):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'_csrftoken': self.token,
'old_password': self.password,
'new_password1': newPassword,
'new_password2': newPassword})
return self.SendRequest('accounts/change_password/', self.generateSignature(data))
def explore(self):
return self.SendRequest('discover/explore/')
def comment(self, mediaId, commentText):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'_csrftoken': self.token,
'comment_text': commentText})
return self.SendRequest('media/' + str(mediaId) + '/comment/', self.generateSignature(data))
def deleteComment(self, mediaId, commentId):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'_csrftoken': self.token})
return self.SendRequest('media/' + str(mediaId) + '/comment/' + str(commentId) + '/delete/', self.generateSignature(data))
def changeProfilePicture(self, photo):
# TODO Instagram.php 705-775
return False
def removeProfilePicture(self):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'_csrftoken': self.token})
return self.SendRequest('accounts/remove_profile_picture/', self.generateSignature(data))
def setPrivateAccount(self):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'_csrftoken': self.token})
return self.SendRequest('accounts/set_private/', self.generateSignature(data))
def setPublicAccount(self):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'_csrftoken': self.token})
return self.SendRequest('accounts/set_public/', self.generateSignature(data))
def getProfileData(self):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'_csrftoken': self.token})
return self.SendRequest('accounts/current_user/?edit=true', self.generateSignature(data))
def editProfile(self, url, phone, first_name, biography, email, gender):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'_csrftoken': self.token,
'external_url': url,
'phone_number': phone,
'username': self.username,
'full_name': first_name,
'biography': biography,
'email': email,
'gender': gender})
return self.SendRequest('accounts/edit_profile/', self.generateSignature(data))
def getUsernameInfo(self, usernameId):
return self.SendRequest('users/' + str(usernameId) + '/info/')
def getSelfUsernameInfo(self):
return self.getUsernameInfo(self.username_id)
def getSelfSavedMedia(self):
return self.SendRequest('feed/saved')
def getRecentActivity(self):
activity = self.SendRequest('news/inbox/?')
return activity
def getFollowingRecentActivity(self):
activity = self.SendRequest('news/?')
return activity
def getv2Inbox(self):
inbox = self.SendRequest('direct_v2/inbox/?')
return inbox
def getv2Threads(self, thread, cursor=None):
endpoint = 'direct_v2/threads/{0}'.format(thread)
if cursor is not None:
endpoint += '?cursor={0}'.format(cursor)
inbox = self.SendRequest(endpoint)
return inbox
def getUserTags(self, usernameId):
tags = self.SendRequest('usertags/' + str(usernameId) + '/feed/?rank_token=' + str(self.rank_token) + '&ranked_content=true&')
return tags
def getSelfUserTags(self):
return self.getUserTags(self.username_id)
def tagFeed(self, tag):
userFeed = self.SendRequest('feed/tag/' + str(tag) + '/?rank_token=' + str(self.rank_token) + '&ranked_content=true&')
return userFeed
def getMediaLikers(self, mediaId):
likers = self.SendRequest('media/' + str(mediaId) + '/likers/?')
return likers
def getGeoMedia(self, usernameId):
locations = self.SendRequest('maps/user/' + str(usernameId) + '/')
return locations
def getSelfGeoMedia(self):
return self.getGeoMedia(self.username_id)
def fbUserSearch(self, query):
query = self.SendRequest('fbsearch/topsearch/?context=blended&query=' + str(query) + '&rank_token=' + str(self.rank_token))
return query
def searchUsers(self, query):
query = self.SendRequest('users/search/?ig_sig_key_version=' + str(self.SIG_KEY_VERSION) + '&is_typeahead=true&query=' + str(query) + '&rank_token=' + str(self.rank_token))
return query
def searchUsername(self, usernameName):
query = self.SendRequest('users/' + str(usernameName) + '/usernameinfo/')
return query
def syncFromAdressBook(self, contacts):
return self.SendRequest('address_book/link/?include=extra_display_name,thumbnails', "contacts=" + json.dumps(contacts))
def searchTags(self, query):
query = self.SendRequest('tags/search/?is_typeahead=true&q=' + str(query) + '&rank_token=' + str(self.rank_token))
return query
def getTimeline(self):
query = self.SendRequest('feed/timeline/?rank_token=' + str(self.rank_token) + '&ranked_content=true&')
return query
def getUserFeed(self, usernameId, maxid='', minTimestamp=None):
query = self.SendRequest('feed/user/%s/?max_id=%s&min_timestamp=%s&rank_token=%s&ranked_content=true'
% (usernameId, maxid, minTimestamp, self.rank_token))
return query
def getSelfUserFeed(self, maxid='', minTimestamp=None):
return self.getUserFeed(self.username_id, maxid, minTimestamp)
def getHashtagFeed(self, hashtagString, maxid=''):
return self.SendRequest('feed/tag/' + hashtagString + '/?max_id=' + str(maxid) + '&rank_token=' + self.rank_token + '&ranked_content=true&')
def searchLocation(self, query):
locationFeed = self.SendRequest('fbsearch/places/?rank_token=' + str(self.rank_token) + '&query=' + str(query))
return locationFeed
def getLocationFeed(self, locationId, maxid=''):
return self.SendRequest('feed/location/' + str(locationId) + '/?max_id=' + maxid + '&rank_token=' + self.rank_token + '&ranked_content=true&')
def getPopularFeed(self):
popularFeed = self.SendRequest('feed/popular/?people_teaser_supported=1&rank_token=' + str(self.rank_token) + '&ranked_content=true&')
return popularFeed
def getUserFollowings(self, usernameId, maxid=''):
url = 'friendships/' + str(usernameId) + '/following/?'
query_string = {'ig_sig_key_version': self.SIG_KEY_VERSION,
'rank_token': self.rank_token}
if maxid:
query_string['max_id'] = maxid
if sys.version_info.major == 3:
url += urllib.parse.urlencode(query_string)
else:
url += urllib.urlencode(query_string)
return self.SendRequest(url)
def getSelfUsersFollowing(self):
return self.getUserFollowings(self.username_id)
def getUserFollowers(self, usernameId, maxid=''):
if maxid == '':
return self.SendRequest('friendships/' + str(usernameId) + '/followers/?rank_token=' + self.rank_token)
else:
return self.SendRequest('friendships/' + str(usernameId) + '/followers/?rank_token=' + self.rank_token + '&max_id=' + str(maxid))
def getSelfUserFollowers(self):
return self.getUserFollowers(self.username_id)
def like(self, mediaId):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'_csrftoken': self.token,
'media_id': mediaId})
return self.SendRequest('media/' + str(mediaId) + '/like/', self.generateSignature(data))
def unlike(self, mediaId):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'_csrftoken': self.token,
'media_id': mediaId})
return self.SendRequest('media/' + str(mediaId) + '/unlike/', self.generateSignature(data))
def getMediaComments(self, mediaId, max_id=''):
return self.SendRequest('media/' + mediaId + '/comments/?max_id=' + max_id)
def setNameAndPhone(self, name='', phone=''):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'first_name': name,
'phone_number': phone,
'_csrftoken': self.token})
return self.SendRequest('accounts/set_phone_and_name/', self.generateSignature(data))
def getDirectShare(self):
return self.SendRequest('direct_share/inbox/?')
def backup(self):
# TODO Instagram.php 1470-1485
return False
def follow(self, userId):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'user_id': userId,
'_csrftoken': self.token})
return self.SendRequest('friendships/create/' + str(userId) + '/', self.generateSignature(data))
def unfollow(self, userId):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'user_id': userId,
'_csrftoken': self.token})
return self.SendRequest('friendships/destroy/' + str(userId) + '/', self.generateSignature(data))
def block(self, userId):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'user_id': userId,
'_csrftoken': self.token})
return self.SendRequest('friendships/block/' + str(userId) + '/', self.generateSignature(data))
def unblock(self, userId):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'user_id': userId,
'_csrftoken': self.token})
return self.SendRequest('friendships/unblock/' + str(userId) + '/', self.generateSignature(data))
def userFriendship(self, userId):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'user_id': userId,
'_csrftoken': self.token})
return self.SendRequest('friendships/show/' + str(userId) + '/', self.generateSignature(data))
def getLikedMedia(self, maxid=''):
return self.SendRequest('feed/liked/?max_id=' + str(maxid))
def generateSignature(self, data, skip_quote=False):
if not skip_quote:
try:
parsedData = urllib.parse.quote(data)
except AttributeError:
parsedData = urllib.quote(data)
else:
parsedData = data
return 'ig_sig_key_version=' + self.SIG_KEY_VERSION + '&signed_body=' + hmac.new(self.IG_SIG_KEY.encode('utf-8'), data.encode('utf-8'), hashlib.sha256).hexdigest() + '.' + parsedData
def generateDeviceId(self, seed):
volatile_seed = "12345"
m = hashlib.md5()
m.update(seed.encode('utf-8') + volatile_seed.encode('utf-8'))
return 'android-' + m.hexdigest()[:16]
def generateUUID(self, type):
generated_uuid = str(uuid.uuid4())
if (type):
return generated_uuid
else:
return generated_uuid.replace('-', '')
def generateUploadId(self):
return str(calendar.timegm(datetime.utcnow().utctimetuple()))
def buildBody(self, bodies, boundary):
body = u''
for b in bodies:
body += u'--{boundary}\r\n'.format(boundary=boundary)
body += u'Content-Disposition: {b_type}; name="{b_name}"'.format(b_type=b['type'], b_name=b['name'])
_filename = b.get('filename', None)
_headers = b.get('headers', None)
if _filename:
_filename, ext = os.path.splitext(_filename)
_body += u'; filename="pending_media_{uid}.{ext}"'.format(uid=self.generateUploadId(), ext=ext)
if _headers and isinstance(_headers, list):
for h in _headers:
_body += u'\r\n{header}'.format(header=h)
body += u'\r\n\r\n{data}\r\n'.format(data=b['data'])
body += u'--{boundary}--'.format(boundary=boundary)
return body
def SendRequest(self, endpoint, post=None, login=False):
verify = False # don't show request warning
if (not self.isLoggedIn and not login):
raise Exception("Not logged in!\n")
self.s.headers.update({'Connection': 'close',
'Accept': '*/*',
'Content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Cookie2': '$Version=1',
'Accept-Language': 'en-US',
'User-Agent': self.USER_AGENT})
while True:
try:
if (post is not None):
response = self.s.post(self.API_URL + endpoint, data=post, verify=verify)
else:
response = self.s.get(self.API_URL + endpoint, verify=verify)
break
except Exception as e:
print('Except on SendRequest (wait 60 sec and resend): ' + str(e))
time.sleep(60)
if response.status_code == 200:
self.LastResponse = response
self.LastJson = json.loads(response.text)
return True
else:
print("Request return " + str(response.status_code) + " error!")
# for debugging
try:
self.LastResponse = response
self.LastJson = json.loads(response.text)
print(self.LastJson)
except:
pass
return False
def getTotalFollowers(self, usernameId):
followers = []
next_max_id = ''
while 1:
self.getUserFollowers(usernameId, next_max_id)
temp = self.LastJson
for item in temp["users"]:
followers.append(item)
if temp["big_list"] is False:
return followers
next_max_id = temp["next_max_id"]
def getTotalFollowings(self, usernameId):
followers = []
next_max_id = ''
while True:
self.getUserFollowings(usernameId, next_max_id)
temp = self.LastJson
for item in temp["users"]:
followers.append(item)
if temp["big_list"] is False:
return followers
next_max_id = temp["next_max_id"]
def getTotalUserFeed(self, usernameId, minTimestamp=None):
user_feed = []
next_max_id = ''
while True:
self.getUserFeed(usernameId, next_max_id, minTimestamp)
temp = self.LastJson
for item in temp["items"]:
user_feed.append(item)
if temp["more_available"] is False:
return user_feed
next_max_id = temp["next_max_id"]
def getTotalSelfUserFeed(self, minTimestamp=None):
return self.getTotalUserFeed(self.username_id, minTimestamp)
def getTotalSelfFollowers(self):
return self.getTotalFollowers(self.username_id)
def getTotalSelfFollowings(self):
return self.getTotalFollowings(self.username_id)
def getTotalLikedMedia(self, scan_rate=1):
next_id = ''
liked_items = []
for x in range(0, scan_rate):
temp = self.getLikedMedia(next_id)
temp = self.LastJson
try:
next_id = temp["next_max_id"]
for item in temp["items"]:
liked_items.append(item)
except KeyError as e:
break
return liked_items
|
# Hello World program in Python
print("Example of Python Tuples\n")
# Create a Tuple:
thistuple = ("apple", "banana", "cherry")
print(thistuple)
# You can access tuple items by referring to the index number:
# Return the item in position 1:
print(thistuple[1])
# You can loop through the tuple items by using a for loop.
# Iterate through the items and print the values:
for x in thistuple:
print(x)
# To determine if a specified item is present in a tuple use the in keyword:
# Check if "apple" is present in the tuple:
thistuple = ("apple", "banana", "cherry")
if "apple" in thistuple:
print("Yes, 'apple' is in the fruits tuple")
# To determine how many items a list have, use the len() method:
# Print the number of items in the tuple:
print(len(thistuple))
# Using the tuple() method to make a tuple:
thistuple = tuple(("apple", "banana", "cherry")) # note the double round-brackets
print(thistuple)
# Once a tuple is created, you cannot add items to it. Tuples are unchangeable.
# You cannot add items to a tuple:
thistuple = ("apple", "banana", "cherry")
thistuple[3] = "orange" # This will raise an error
print(thistuple)
# The del keyword can delete the tuple completely:
del thistuple
print(thistuple) # this will raise an error because the tuple no longer exists
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.