content stringlengths 5 1.05M |
|---|
import os
import inspect
src = inspect.getsource(os)
print(src) |
__app_name__ = "hetzner-control"
__version__ = "0.3.0"
|
class Solution:
def numSquares(self, n: int) -> int:
squar_nums = [x * x for x in range(1, 101) if x * x <= n]
dp = [0] * (n + 1)
for num in range(1, n+1):
min_ = 10
for squar_num in squar_nums:
if squar_num > num:
break
min_ = min(min_, dp[num - squar_num] + 1)
dp[num] = min_
return dp[n]
print(Solution().numSquares(12)) # 3
print(Solution().numSquares(13)) # 2
print(Solution().numSquares(14)) # 3
print(Solution().numSquares(15)) # 4
print(Solution().numSquares(16)) # 1
print(Solution().numSquares(100)) # 1
print(Solution().numSquares(198)) # 3
print(Solution().numSquares(9999)) # 4
|
"""
Scheme obtained by gluing two other schemes
"""
#*******************************************************************************
# Copyright (C) 2006 William Stein
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#*******************************************************************************
import morphism
import scheme
class GluedScheme(scheme.Scheme):
r"""
INPUT:
- ``f`` - open immersion from a scheme U to a scheme
X
- ``g`` - open immersion from U to a scheme Y
OUTPUT: The scheme obtained by gluing X and Y along the open set
U.
.. note::
Checking that `f` and `g` are open
immersions is not implemented.
"""
def __init__(self, f, g, check=True):
if check:
if not morphism.is_SchemeMorphism(f):
raise TypeError("f (=%s) must be a scheme morphism"%f)
if not morphism.is_SchemeMorphism(g):
raise TypeError("g (=%s) must be a scheme morphism"%g)
if f.domain() != g.domain():
raise ValueError("f (=%s) and g (=%s) must have the same domain"%(f,g))
self.__f = f
self.__g = g
def gluing_maps(self):
return self.__f, self.__g
def _repr_(self):
return "Scheme obtained by gluing X and Y along U, where\n X: %s\n Y: %s\n U: %s"%(
self.__f.codomain(), self.__g.codomain(), self.__f.domain())
|
# Generated by Django 3.0.7 on 2020-06-12 19:58
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('authorization', '0005_user_banned'),
('comment', '0004_auto_20200529_2044'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='liked',
field=models.ManyToManyField(blank=True, related_name='liked', to='authorization.User'),
),
migrations.AlterField(
model_name='comment',
name='parentComment',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
to='comment.Comment'),
),
migrations.AlterField(
model_name='comment',
name='recipient',
field=models.ForeignKey(blank=True, null=True, on_delete=models.SET('[törölt]'), related_name='recipient',
to='authorization.User'),
),
]
|
import picamera
import picamera.array
import png
import math
from pixel_object import PixelObject
"""
Image processor that can find the edges in a PNG image captured by a PiCamera.
"""
class ImageProcessor:
def __init__(self, res_width=96, res_height=96):
self.camera = picamera.PiCamera(resolution=(res_width, res_height))
# TODO propagate configurable resolution through '96' logic below
self.camera.hflip = True
self.camera.vflip = True
self.res_width = res_width
self.res_height = res_height
self.stream = picamera.array.PiYUVArray(self.camera)
self.pixelObjList = []
self.object_id_center = 0
self.pixelObjList.append(PixelObject(self.next_obj_id()))
self.max_pixel_count = 0
self.largest_object_id = 0
self.largest_X = 0
self.largest_Y = 0
self.filename = ''
def close(self):
print('[ImageProcessor.close] flushing')
self.pixelObjList = []
self.object_id_center = 0
self.max_pixel_count = 0
self.largest_object_id = 0
self.largest_X = 0
self.largest_Y = 0
self.camera.close()
def next_obj_id(self):
self.object_id_center += 1
return self.object_id_center
def capture_frame(self):
self.stream = picamera.array.PiYUVArray(self.camera)
self.camera.capture(self.stream, 'yuv')
self.camera._set_led(True)
self.pixelObjList = []
self.object_id_center = 0
self.pixelObjList.append(PixelObject(self.next_obj_id()))
rows = []
for _ in range(self.res_height):
rows.append(range(self.res_width))
# flip image horizontally
for j, j_ in enumerate(range(self.res_width-1, -1, -1)):
# now flip vertically
for i, i_ in enumerate(range(self.res_height-1, -1, -1)):
rows[j][i] = self.stream.array[j_][i_][0]
self.filename = self.save_PNG('raw.png', rows)
self.spread_white_pixels(
self.make_black_and_white(
self.fuse_horizontal_and_vertical(
self.get_horizontal_edges(rows),
self.get_vertical_edges(rows)))
)
def get_horizontal_edges(self, raw_rows):
# get horizontal edges
rows = []
for _ in range(96):
rows.append(range(96))
for j in range(96):
for i in range(96):
if i + 1 <= 95:
rows[j][i] = self.difference(raw_rows[j][i],
raw_rows[j][i + 1])
else:
rows[j][i] = self.difference(raw_rows[j][i],
raw_rows[j][i - 1])
self.save_PNG('processed_1.png', rows)
return rows
def get_vertical_edges(self, rawrows):
# get vertical edges
rows = []
for _ in range(96):
rows.append(range(96))
for j in range(96):
for i in range(96):
if j + 1 <= 95:
rows[j][i] = self.difference(
rawrows[j][i], rawrows[j + 1][i])
else:
rows[j][i] = self.difference(
rawrows[j][i], rawrows[j - 1][i])
self.save_PNG('processed_2.png', rows)
return rows
def fuse_horizontal_and_vertical(self, hrows, vrows):
# fuse the horizontal edge-image with the vertical edge-image
rows = []
for _ in range(96):
rows.append(range(96))
for j in range(96):
for i in range(96):
rows[j][i] = self.fusion(hrows[j][i], vrows[j][i])
self.save_PNG('processed_3.png', rows)
return rows
def make_black_and_white(self, edge_rows):
# make the image dual in color (black and white)
threshold = 18
rows = []
for _ in range(96):
rows.append(range(96))
for j in range(96):
for i in range(96):
if edge_rows[j][i] >= threshold:
rows[j][i] = 255
else:
rows[j][i] = 0
self.save_PNG('processed_4.png', rows)
return rows
def spread_white_pixels(self, bw_rows):
# make all the white pixels spread out one more pixel
rows = []
for _ in range(96):
rows.append(range(96))
for j in range(96):
for i in range(96):
if bw_rows[j][i] == 255:
tmp_list = self.neighbors((i, j), 96, 96)
for ent in tmp_list:
tmp_x, tmp_y = ent
rows[tmp_y][tmp_x] = 255
else:
rows[j][i] = 0
self.save_PNG('processed_4_5.png', rows)
self.identify_pixel_objects(rows)
def identify_pixel_objects(self, bw_rows):
# make PixelObjects when pixels are direct 8-neighbours of each other
for j in range(96):
for i in range(96):
if bw_rows[j][i] == 255: # if the pixel is white
tmp_list = []
for ent in self.neighbors((i, j), 96, 96):
tmp_x, tmp_y = ent
if bw_rows[tmp_y][tmp_x] == 255: # if pixel is white
tmp_list.append(ent)
# print tmp_list
flag = False
for obj in self.pixelObjList:
# make a new PixelObj whenever a Pixel isn't connected
# to an object
if obj.check_xy_set(tmp_list) is True:
flag = True
if flag is False:
self.pixelObjList.append(
PixelObject(self.next_obj_id()))
for obj in self.pixelObjList:
obj.check_xy_set(tmp_list)
for obj in self.pixelObjList:
rows = []
for _ in range(96):
rows.append(range(96))
for j in range(96):
for i in range(96):
if (i, j) in obj.XYset:
rows[j][i] = 255
else:
rows[j][i] = 0
# self.save_PNG(string.join([str(obj.id_), 'processed_5.png'], ''), rows)
self.merge_overlapping_pixel_objects()
def merge_overlapping_pixel_objects(self):
# merge objects with overlapping x-y tuples together
center = 0
max_entry = len(self.pixelObjList) - 1
# old_len = len(self.pixelObjList)
# flag = False
while center < max_entry:
tmp = self.check_overlap(center)
if tmp is False:
center += 1
else:
for ent in self.pixelObjList[tmp].XYset:
self.pixelObjList[center].XYset.add(ent)
del self.pixelObjList[tmp]
max_entry = len(self.pixelObjList) - 1
for obj in self.pixelObjList:
object_pixels = obj.count_pixel()
# if self.max_pixel_count == 0:
# results['max_pixel_count'] = object_pixels
# results['object_id'] = obj.id_
if object_pixels > self.max_pixel_count:
self.max_pixel_count = object_pixels
self.largest_object_id = obj.id_
x, y = obj.compute_mean_coord()
self.largest_X = x
self.largest_Y = y
# print obj.XYset
rows = []
for _ in range(96):
rows.append(range(96))
for j in range(96):
for i in range(96):
if (i, j) in obj.XYset:
rows[j][i] = 255
else:
rows[j][i] = 0
# self.save_PNG(string.join([str(obj.id_), 'pixpro.png'], ''), rows)
# print("nmbr of pre Objects:{0}".format(old_len))
self.new_one_pixel_png()
def new_one_pixel_png(self):
"""
make a new png with 1 pixel per object at their respective center
:return:
"""
tmp_pos_list = []
for obj in self.pixelObjList:
print("X:{0} Y:{1}".format(obj.coord_x, obj.coord_y))
tmp_pos_list.append((obj.coord_x, obj.coord_real_y))
rows = []
for _ in range(96):
rows.append(range(96))
for j in range(96):
for i in range(96):
if (i, j) in tmp_pos_list:
rows[j][i] = 255
else:
rows[j][i] = 0
self.save_PNG('PixelObjectPos.png', rows)
def check_overlap(self, counter):
"""
check for overlapping x-y tuples in sets in 2 distinct objects
return the listNumber for the object with overlapping pixels if there
are overlapping pixels return False if not
"""
max_entry = len(self.pixelObjList) - 1
for ent1 in self.pixelObjList[counter].XYset:
for i in range(counter + 1, max_entry + 1, 1):
for ent2 in self.pixelObjList[i].XYset:
if ent1 == ent2:
return i
return False
def save_PNG(self, filename, rws):
# print("[save_PNG] filename:{0} rws:{1}".format(filename, rws))
name = 'img/{0}'.format(filename)
f = open(name, 'wb')
w = png.Writer(96, 96, greyscale=True)
w.write(f, rws)
f.close()
return name
def difference(self, a, b):
if a >= b:
return a - b
else:
return b - a
def fusion(self, a, b):
a_ = int(a)
b_ = int(b)
tmp = round(math.sqrt(a_ * a_ + b_ * b_))
if tmp <= 255:
return int(tmp)
else:
return 255
def neighbors(self, (x, y), max_x, max_y):
n_list = []
xx, yy = (x, y)
for y_ in range(-1, 2, 1):
for x_ in range(-1, 2, 1):
res_x = xx + x_
res_y = yy + y_
if max_x > res_x >= 0 and max_y > res_y >= 0:
n_list.append((res_x, res_y))
return n_list
|
#-------------------------------------------------------------------------------
# elftools: elf/notes.py
#
# ELF notes
#
# Eli Bendersky (eliben@gmail.com)
# This code is in the public domain
#-------------------------------------------------------------------------------
from ..common.py3compat import bytes2hex, bytes2str
from ..common.utils import struct_parse, roundup
from ..construct import CString
def iter_notes(elffile, offset, size):
""" Yield all the notes in a section or segment.
"""
end = offset + size
while offset < end:
note = struct_parse(
elffile.structs.Elf_Nhdr,
elffile.stream,
stream_pos=offset)
note['n_offset'] = offset
offset += elffile.structs.Elf_Nhdr.sizeof()
elffile.stream.seek(offset)
# n_namesz is 4-byte aligned.
disk_namesz = roundup(note['n_namesz'], 2)
note['n_name'] = bytes2str(
CString('').parse(elffile.stream.read(disk_namesz)))
offset += disk_namesz
desc_data = elffile.stream.read(note['n_descsz'])
note['n_descdata'] = desc_data
if note['n_type'] == 'NT_GNU_ABI_TAG':
note['n_desc'] = struct_parse(elffile.structs.Elf_abi,
elffile.stream,
offset)
elif note['n_type'] == 'NT_GNU_BUILD_ID':
note['n_desc'] = bytes2hex(desc_data)
elif note['n_type'] == 'NT_GNU_GOLD_VERSION':
note['n_desc'] = bytes2str(desc_data)
elif note['n_type'] == 'NT_PRPSINFO':
note['n_desc'] = struct_parse(elffile.structs.Elf_Prpsinfo,
elffile.stream,
offset)
elif note['n_type'] == 'NT_FILE':
note['n_desc'] = struct_parse(elffile.structs.Elf_Nt_File,
elffile.stream,
offset)
elif note['n_type'] == 'NT_GNU_PROPERTY_TYPE_0':
off = offset
props = []
while off < end:
p = struct_parse(elffile.structs.Elf_Prop, elffile.stream, off)
off += roundup(p.pr_datasz + 8, 2 if elffile.elfclass == 32 else 3)
props.append(p)
note['n_desc'] = props
else:
note['n_desc'] = desc_data
offset += roundup(note['n_descsz'], 2)
note['n_size'] = offset - note['n_offset']
yield note
|
# -*- encoding: utf-8 -*-
import argparse
import getpass
import json
import math
import sys
from tabulate import tabulate
from ..consts import PER_PAGE
from ..libs.config import Config
def user_input(text='', hide_input=False):
""" Nice little function to read text inputs from stdin """
try:
if hide_input:
inp = getpass.getpass(text)
else:
inp = input(text)
except KeyboardInterrupt:
sys.exit(0)
except Exception:
inp = None
return inp
def print_header(text=''):
""" Pretty print header text """
print('#' * (len(text) + 3))
print(' %s' % text)
print('#' * (len(text) + 3))
def print_table(header=[], rows=[]):
""" Print tables in commandline """
print(tabulate(rows, headers=header, tablefmt='grid'))
def print_result(format, rows=[], header=[], count=0, page=1, heading=False,
zeroResultMsg='No results found for your query.'):
""" Print result in format specified by user """
if not format:
format = 'table'
if format == 'table':
if count == 0:
print(zeroResultMsg)
else:
total_pages = int(((count - 1)/PER_PAGE) + 1)
if heading:
print_header(heading)
else:
print_header('Found %s results. On page %s of %s' %
(count, page, total_pages))
print_table(header, rows)
else:
json_result = json.dumps([dict(zip(header, row)) for row in rows],
indent=2, sort_keys=True)
print(json_result)
def readableMemoryFormat(size):
if (size == 0):
return '0B'
size_name = ("KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size, 1024)))
p = math.pow(1024, i)
s = round(size/p, 2)
return '%s %s' % (s, size_name[i])
class CondAction(argparse.Action):
""" A custom argparse action to support required arguments """
def __init__(self, option_strings, dest, nargs=None, **kwargs):
x = kwargs.pop('to_be_required', [])
super(CondAction, self).__init__(option_strings, dest, **kwargs)
self.make_required = x
def __call__(self, parser, namespace, values, option_string=None):
if values in self.make_required:
config = Config()
options_required = self.make_required[values]
for x in options_required:
if not config.get(x.dest):
x.required = True
try:
setattr(namespace, self.dest, values)
return super(CondAction, self).__call__(parser, namespace, values,
option_string)
except NotImplementedError:
pass
|
"""A setuptools based setup module.
See:
https://packaging.python.org/guides/distributing-packages-using-setuptools/
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
import pathlib
here = pathlib.Path(__file__).parent
# Get the long description from the README file
long_description = (here / "README.md").read_text()
setup(
name="inkblot",
version="0.0.1",
description="A simpler static site generator.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/rpalo/inkblot",
author="Ryan Palo",
author_email="ryan@thepalos.com",
classifiers=[
"Development Status :: 3 - Alpha",
# # Indicate who your project is intended for
# 'Intended Audience :: Developers',
# 'Topic :: Software Development :: Build Tools',
# Pick your license as you wish
"License :: OSI Approved :: MIT License",
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
# These classifiers are *not* checked by 'pip install'. See instead
# 'python_requires' below.
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
keywords="static-site-generator web",
packages=find_packages(exclude=["contrib", "docs", "tests"]),
# If there are data files included in your packages that need to be
# installed, specify them here.
#
# If using Python 2.6 or earlier, then these have to be included in
# MANIFEST.in as well.
# package_data={"sample": ["package_data.dat"]}, # Optional
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
#
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[("my_data", ["data/data_file"])], # Optional
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# `pip` to create the appropriate form of executable for the target
# platform.
#
# For example, the following would provide a command called `sample` which
# executes the function `main` from this package when invoked:
entry_points={"console_scripts": ["inkblot=inkblot.__main__:main"]}, # Optional
# project_urls={ # Optional
# "Bug Reports": "https://github.com/pypa/sampleproject/issues",
# "Funding": "https://donate.pypi.org",
# "Say Thanks!": "http://saythanks.io/to/example",
# "Source": "https://github.com/pypa/sampleproject/",
# },
)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from pulp import LpProblem, LpStatus, lpSum, LpVariable, GLPK, LpMinimize
OBJ_EPSILON = 1e-12
class Game(object):
def __init__(self, config, env, random_seed=1000):
self.random_state = np.random.RandomState(seed=random_seed)
self.data_dir = env.data_dir
self.DG = env.topology.DG
self.traffic_file = env.traffic_file
self.traffic_matrices = env.traffic_matrices
self.traffic_matrices_dims = self.traffic_matrices.shape
self.tm_cnt = env.tm_cnt
self.num_pairs = env.num_pairs
self.pair_idx_to_sd = env.pair_idx_to_sd
self.pair_sd_to_idx = env.pair_sd_to_idx
self.num_nodes = env.num_nodes
self.num_links = env.num_links
self.link_idx_to_sd = env.link_idx_to_sd
self.link_sd_to_idx = env.link_sd_to_idx
self.link_capacities = env.link_capacities
self.link_weights = env.link_weights
self.shortest_paths_node = env.shortest_paths_node # paths with node info
self.shortest_paths_link = env.shortest_paths_link # paths with link info
self.get_ecmp_next_hops()
self.model_type = config.model_type
# for LP
self.lp_pairs = [p for p in range(self.num_pairs)]
self.lp_nodes = [n for n in range(self.num_nodes)]
self.links = [e for e in range(self.num_links)]
self.lp_links = [e for e in self.link_sd_to_idx]
self.pair_links = [(pr, e[0], e[1]) for pr in self.lp_pairs for e in self.lp_links]
self.timeout = config.timeout
self.load_multiplier = {}
def generate_inputs(self, normalization=True):
self.normalized_traffic_matrices = np.zeros(
(self.valid_tm_cnt, self.traffic_matrices_dims[1], self.traffic_matrices_dims[2], self.tm_history),
dtype=np.float32) # tm state [Valid_tms, Node, Node, History]
idx_offset = self.tm_history - 1
for tm_idx in self.tm_indexes:
for h in range(self.tm_history):
if normalization:
tm_max_element = np.max(self.traffic_matrices[tm_idx - h])
if tm_max_element > 0:
self.normalized_traffic_matrices[tm_idx - idx_offset, :, :, h] = self.traffic_matrices[
tm_idx - h] / tm_max_element # [Valid_tms, Node, Node, History]
else:
self.normalized_traffic_matrices[tm_idx - idx_offset, :, :, h] = self.traffic_matrices[
tm_idx - h] # [Valid_tms, Node, Node, History]
else:
self.normalized_traffic_matrices[tm_idx - idx_offset, :, :, h] = self.traffic_matrices[
tm_idx - h] # [Valid_tms, Node, Node, History]
def get_topK_flows(self, tm_idx, pairs):
tm = self.traffic_matrices[tm_idx]
f = {}
for p in pairs:
s, d = self.pair_idx_to_sd[p]
f[p] = tm[s][d]
sorted_f = sorted(f.items(), key=lambda kv: (kv[1], kv[0]), reverse=True)
cf = []
for i in range(self.max_moves):
cf.append(sorted_f[i][0])
return cf
def get_ecmp_next_hops(self):
self.ecmp_next_hops = {}
for src in range(self.num_nodes):
for dst in range(self.num_nodes):
if src == dst:
continue
self.ecmp_next_hops[src, dst] = []
for p in self.shortest_paths_node[self.pair_sd_to_idx[(src, dst)]]:
if p[1] not in self.ecmp_next_hops[src, dst]:
self.ecmp_next_hops[src, dst].append(p[1])
def ecmp_next_hop_distribution(self, link_loads, demand, src, dst):
if src == dst:
return
ecmp_next_hops = self.ecmp_next_hops[src, dst]
next_hops_cnt = len(ecmp_next_hops)
# if next_hops_cnt > 1:
# print(self.shortest_paths_node[self.pair_sd_to_idx[(src, dst)]])
ecmp_demand = demand / next_hops_cnt
for np in ecmp_next_hops:
link_loads[self.link_sd_to_idx[(src, np)]] += ecmp_demand
self.ecmp_next_hop_distribution(link_loads, ecmp_demand, np, dst)
def ecmp_traffic_distribution(self, tm_idx):
link_loads = np.zeros((self.num_links))
tm = self.traffic_matrices[tm_idx]
for pair_idx in range(self.num_pairs):
s, d = self.pair_idx_to_sd[pair_idx]
demand = tm[s][d]
if demand != 0:
self.ecmp_next_hop_distribution(link_loads, demand, s, d)
return link_loads
def get_critical_topK_flows(self, tm_idx, critical_links=5):
link_loads = self.ecmp_traffic_distribution(tm_idx)
critical_link_indexes = np.argsort(-(link_loads / self.link_capacities))[:critical_links]
cf_potential = []
for pair_idx in range(self.num_pairs):
for path in self.shortest_paths_link[pair_idx]:
if len(set(path).intersection(critical_link_indexes)) > 0:
cf_potential.append(pair_idx)
break
# print(cf_potential)
assert len(cf_potential) >= self.max_moves, \
("cf_potential(%d) < max_move(%d), please increase critical_links(%d)" % (
cf_potential, self.max_moves, critical_links))
return self.get_topK_flows(tm_idx, cf_potential)
def eval_ecmp_traffic_distribution(self, tm_idx, eval_delay=False):
eval_link_loads = self.ecmp_traffic_distribution(tm_idx)
eval_max_utilization = np.max(eval_link_loads / self.link_capacities)
self.load_multiplier[tm_idx] = 0.9 / eval_max_utilization
delay = 0
if eval_delay:
eval_link_loads *= self.load_multiplier[tm_idx]
delay = sum(eval_link_loads / (self.link_capacities - eval_link_loads))
return eval_max_utilization, delay
def optimal_routing_mlu(self, tm_idx):
tm = self.traffic_matrices[tm_idx]
demands = {}
for i in range(self.num_pairs):
s, d = self.pair_idx_to_sd[i]
demands[i] = tm[s][d]
model = LpProblem(name="routing")
# ratio = LpVariable.dicts(name="ratio", indexs=self.pair_links, cat='Binary')
ratio = LpVariable.dicts(name="ratio", indexs=self.pair_links, lowBound=0, upBound=1)
link_load = LpVariable.dicts(name="link_load", indexs=self.links)
r = LpVariable(name="congestion_ratio")
for pr in self.lp_pairs:
model += (
lpSum([ratio[pr, e[0], e[1]] for e in self.lp_links if e[1] == self.pair_idx_to_sd[pr][0]]) - lpSum(
[ratio[pr, e[0], e[1]] for e in self.lp_links if e[0] == self.pair_idx_to_sd[pr][0]]) == -1,
"flow_conservation_constr1_%d" % pr)
for pr in self.lp_pairs:
model += (
lpSum([ratio[pr, e[0], e[1]] for e in self.lp_links if e[1] == self.pair_idx_to_sd[pr][1]]) - lpSum(
[ratio[pr, e[0], e[1]] for e in self.lp_links if e[0] == self.pair_idx_to_sd[pr][1]]) == 1,
"flow_conservation_constr2_%d" % pr)
for pr in self.lp_pairs:
for n in self.lp_nodes:
if n not in self.pair_idx_to_sd[pr]:
model += (lpSum([ratio[pr, e[0], e[1]] for e in self.lp_links if e[1] == n]) - lpSum(
[ratio[pr, e[0], e[1]] for e in self.lp_links if e[0] == n]) == 0,
"flow_conservation_constr3_%d_%d" % (pr, n))
for e in self.lp_links:
ei = self.link_sd_to_idx[e]
model += (link_load[ei] == lpSum([demands[pr] * ratio[pr, e[0], e[1]] for pr in self.lp_pairs]),
"link_load_constr%d" % ei)
model += (link_load[ei] <= self.link_capacities[ei] * r, "congestion_ratio_constr%d" % ei)
model += r + OBJ_EPSILON * lpSum([link_load[e] for e in self.links])
model.solve(solver=GLPK(msg=False))
assert LpStatus[model.status] == 'Optimal'
obj_r = r.value()
solution = {}
for k in ratio:
solution[k] = ratio[k].value()
return obj_r, solution
def eval_optimal_routing_mlu(self, tm_idx, solution, eval_delay=False):
optimal_link_loads = np.zeros((self.num_links))
eval_tm = self.traffic_matrices[tm_idx]
for i in range(self.num_pairs):
s, d = self.pair_idx_to_sd[i]
demand = eval_tm[s][d]
for e in self.lp_links:
link_idx = self.link_sd_to_idx[e]
optimal_link_loads[link_idx] += demand * solution[i, e[0], e[1]]
optimal_max_utilization = np.max(optimal_link_loads / self.link_capacities)
delay = 0
if eval_delay:
assert tm_idx in self.load_multiplier, (tm_idx)
optimal_link_loads *= self.load_multiplier[tm_idx]
delay = sum(optimal_link_loads / (self.link_capacities - optimal_link_loads))
return optimal_max_utilization, delay
def optimal_routing_mlu_critical_pairs(self, tm_idx, critical_pairs):
tm = self.traffic_matrices[tm_idx]
pairs = critical_pairs
demands = {}
background_link_loads = np.zeros(self.num_links)
for i in range(self.num_pairs):
s, d = self.pair_idx_to_sd[i]
# background link load
if i not in critical_pairs:
self.ecmp_next_hop_distribution(background_link_loads, tm[s][d], s, d)
else:
demands[i] = tm[s][d]
model = LpProblem(name="routing")
pair_links = [(pr, e[0], e[1]) for pr in pairs for e in self.lp_links]
# ratio = LpVariable.dicts(name="ratio", indexs=pair_links, cat='Binary')
ratio = LpVariable.dicts(name="ratio", indexs=pair_links, lowBound=0, upBound=1)
link_load = LpVariable.dicts(name="link_load", indexs=self.links)
r = LpVariable(name="congestion_ratio")
for pr in pairs:
model += (
lpSum([ratio[pr, e[0], e[1]] for e in self.lp_links if e[1] == self.pair_idx_to_sd[pr][0]]) - lpSum(
[ratio[pr, e[0], e[1]] for e in self.lp_links if e[0] == self.pair_idx_to_sd[pr][0]]) == -1,
"flow_conservation_constr1_%d" % pr)
for pr in pairs:
model += (
lpSum([ratio[pr, e[0], e[1]] for e in self.lp_links if e[1] == self.pair_idx_to_sd[pr][1]]) - lpSum(
[ratio[pr, e[0], e[1]] for e in self.lp_links if e[0] == self.pair_idx_to_sd[pr][1]]) == 1,
"flow_conservation_constr2_%d" % pr)
for pr in pairs:
for n in self.lp_nodes:
if n not in self.pair_idx_to_sd[pr]:
model += (lpSum([ratio[pr, e[0], e[1]] for e in self.lp_links if e[1] == n]) - lpSum(
[ratio[pr, e[0], e[1]] for e in self.lp_links if e[0] == n]) == 0,
"flow_conservation_constr3_%d_%d" % (pr, n))
for e in self.lp_links:
ei = self.link_sd_to_idx[e]
model += (
link_load[ei] == background_link_loads[ei] + lpSum(
[demands[pr] * ratio[pr, e[0], e[1]] for pr in pairs]),
"link_load_constr%d" % ei)
model += (link_load[ei] <= self.link_capacities[ei] * r, "congestion_ratio_constr%d" % ei)
model += r + OBJ_EPSILON * lpSum([link_load[ei] for ei in self.links])
model.solve(solver=GLPK(msg=False))
assert LpStatus[model.status] == 'Optimal'
obj_r = r.value()
solution = {}
for k in ratio:
solution[k] = ratio[k].value()
return obj_r, solution
def eval_critical_flow_and_ecmp(self, tm_idx, critical_pairs, solution, eval_delay=False):
eval_tm = self.traffic_matrices[tm_idx]
eval_link_loads = np.zeros((self.num_links))
for i in range(self.num_pairs):
s, d = self.pair_idx_to_sd[i]
if i not in critical_pairs:
self.ecmp_next_hop_distribution(eval_link_loads, eval_tm[s][d], s, d)
else:
demand = eval_tm[s][d]
for e in self.lp_links:
link_idx = self.link_sd_to_idx[e]
eval_link_loads[link_idx] += eval_tm[s][d] * solution[i, e[0], e[1]]
eval_max_utilization = np.max(eval_link_loads / self.link_capacities)
delay = 0
if eval_delay:
assert tm_idx in self.load_multiplier, (tm_idx)
eval_link_loads *= self.load_multiplier[tm_idx]
delay = sum(eval_link_loads / (self.link_capacities - eval_link_loads))
return eval_max_utilization, delay
def optimal_routing_delay(self, tm_idx):
assert tm_idx in self.load_multiplier, (tm_idx)
tm = self.traffic_matrices[tm_idx] * self.load_multiplier[tm_idx]
demands = {}
for i in range(self.num_pairs):
s, d = self.pair_idx_to_sd[i]
demands[i] = tm[s][d]
model = LpProblem(name="routing")
# ratio = LpVariable.dicts(name="ratio", indexs=self.pair_links, cat='Binary')
ratio = LpVariable.dicts(name="ratio", indexs=self.pair_links, lowBound=0, upBound=1)
link_load = LpVariable.dicts(name="link_load", indexs=self.links)
f = LpVariable.dicts(name="link_cost", indexs=self.links)
for pr in self.lp_pairs:
model += (
lpSum([ratio[pr, e[0], e[1]] for e in self.lp_links if e[1] == self.pair_idx_to_sd[pr][0]]) - lpSum(
[ratio[pr, e[0], e[1]] for e in self.lp_links if e[0] == self.pair_idx_to_sd[pr][0]]) == -1,
"flow_conservation_constr1_%d" % pr)
for pr in self.lp_pairs:
model += (
lpSum([ratio[pr, e[0], e[1]] for e in self.lp_links if e[1] == self.pair_idx_to_sd[pr][1]]) - lpSum(
[ratio[pr, e[0], e[1]] for e in self.lp_links if e[0] == self.pair_idx_to_sd[pr][1]]) == 1,
"flow_conservation_constr2_%d" % pr)
for pr in self.lp_pairs:
for n in self.lp_nodes:
if n not in self.pair_idx_to_sd[pr]:
model += (lpSum([ratio[pr, e[0], e[1]] for e in self.lp_links if e[1] == n]) - lpSum(
[ratio[pr, e[0], e[1]] for e in self.lp_links if e[0] == n]) == 0,
"flow_conservation_constr3_%d_%d" % (pr, n))
for e in self.lp_links:
ei = self.link_sd_to_idx[e]
model += (link_load[ei] == lpSum([demands[pr] * ratio[pr, e[0], e[1]] for pr in self.lp_pairs]),
"link_load_constr%d" % ei)
model += (f[ei] * self.link_capacities[ei] >= link_load[ei], "cost_constr1_%d" % ei)
model += (f[ei] >= 3 * link_load[ei] / self.link_capacities[ei] - 2 / 3, "cost_constr2_%d" % ei)
model += (f[ei] >= 10 * link_load[ei] / self.link_capacities[ei] - 16 / 3, "cost_constr3_%d" % ei)
model += (f[ei] >= 70 * link_load[ei] / self.link_capacities[ei] - 178 / 3, "cost_constr4_%d" % ei)
model += (f[ei] >= 500 * link_load[ei] / self.link_capacities[ei] - 1468 / 3, "cost_constr5_%d" % ei)
model += (f[ei] >= 5000 * link_load[ei] / self.link_capacities[ei] - 16318 / 3, "cost_constr6_%d" % ei)
model += lpSum(f[ei] for ei in self.links)
model.solve(solver=GLPK(msg=False))
assert LpStatus[model.status] == 'Optimal'
solution = {}
for k in ratio:
solution[k] = ratio[k].value()
return solution
def eval_optimal_routing_delay(self, tm_idx, solution):
optimal_link_loads = np.zeros((self.num_links))
assert tm_idx in self.load_multiplier, (tm_idx)
eval_tm = self.traffic_matrices[tm_idx] * self.load_multiplier[tm_idx]
for i in range(self.num_pairs):
s, d = self.pair_idx_to_sd[i]
demand = eval_tm[s][d]
for e in self.lp_links:
link_idx = self.link_sd_to_idx[e]
optimal_link_loads[link_idx] += demand * solution[i, e[0], e[1]]
optimal_delay = sum(optimal_link_loads / (self.link_capacities - optimal_link_loads))
return optimal_delay
class CFRRL_Game(Game):
def __init__(self, config, env, random_seed=1000):
super(CFRRL_Game, self).__init__(config, env, random_seed)
self.project_name = config.project_name
self.action_dim = env.num_pairs
self.max_moves = int(self.action_dim * (config.max_moves / 100.))
assert self.max_moves <= self.action_dim, (self.max_moves, self.action_dim)
self.tm_history = 1
self.tm_indexes = np.arange(self.tm_history - 1, self.tm_cnt)
self.valid_tm_cnt = len(self.tm_indexes)
if config.method == 'pure_policy':
self.baseline = {}
self.generate_inputs(normalization=True)
self.state_dims = self.normalized_traffic_matrices.shape[1:]
print('Input dims :', self.state_dims)
print('Max moves :', self.max_moves)
def get_state(self, tm_idx):
idx_offset = self.tm_history - 1
return self.normalized_traffic_matrices[tm_idx - idx_offset]
def reward(self, tm_idx, actions):
mlu, _ = self.optimal_routing_mlu_critical_pairs(tm_idx, actions)
if mlu <= 0:
mlu = 10e-6
reward = 1 / mlu
return reward
def advantage(self, tm_idx, reward):
if tm_idx not in self.baseline:
return reward
total_v, cnt = self.baseline[tm_idx]
# print(reward, (total_v/cnt))
return reward - (total_v / cnt)
def update_baseline(self, tm_idx, reward):
if tm_idx in self.baseline:
total_v, cnt = self.baseline[tm_idx]
total_v += reward
cnt += 1
self.baseline[tm_idx] = (total_v, cnt)
else:
self.baseline[tm_idx] = (reward, 1)
# def dic2array(self, solution):
# n_crit_pairs = int(len(solution)/ self.num_links)
# asolution = np.zeros(shape=(n_crit_pairs, self.num_links, self.num_links), dtype=np.float_)
# for k, e1, e2, v in solution:
# asolution[k, e1, e2] = v
# return asolution
def evaluate(self, tm_idx, actions=None, ecmp=True, eval_delay=False):
ecmp_mlu, ecmp_delay = self.eval_ecmp_traffic_distribution(tm_idx, eval_delay=eval_delay)
_, solution = self.optimal_routing_mlu_critical_pairs(tm_idx, actions)
mlu, delay = self.eval_critical_flow_and_ecmp(tm_idx, actions, solution, eval_delay=eval_delay)
crit_topk = self.get_critical_topK_flows(tm_idx)
_, crit_topk_solution = self.optimal_routing_mlu_critical_pairs(tm_idx, crit_topk)
crit_mlu, crit_delay = self.eval_critical_flow_and_ecmp(tm_idx, crit_topk, crit_topk_solution,
eval_delay=eval_delay)
topk = self.get_topK_flows(tm_idx, self.lp_pairs)
_, topk_solution = self.optimal_routing_mlu_critical_pairs(tm_idx, topk)
topk_mlu, topk_delay = self.eval_critical_flow_and_ecmp(tm_idx, topk, topk_solution, eval_delay=eval_delay)
_, optimal_solution = self.optimal_routing_mlu(tm_idx)
optimal_mlu, optimal_mlu_delay = self.eval_optimal_routing_mlu(tm_idx, optimal_solution, eval_delay=eval_delay)
norm_mlu = optimal_mlu / mlu
line = str(tm_idx) + ', ' + str(optimal_mlu) + ', ' + str(norm_mlu) + ', ' + str(mlu) + ', '
norm_crit_mlu = optimal_mlu / crit_mlu
line += str(norm_crit_mlu) + ', ' + str(crit_mlu) + ', '
norm_topk_mlu = optimal_mlu / topk_mlu
line += str(norm_topk_mlu) + ', ' + str(topk_mlu) + ', '
# norm_ecmp_mlu = optimal_mlu / ecmp_mlu
# line += str(norm_ecmp_mlu) + ', ' + str(ecmp_mlu) + ', '
if eval_delay:
solution = self.optimal_routing_delay(tm_idx)
optimal_delay = self.eval_optimal_routing_delay(tm_idx, solution)
line += str(optimal_delay / delay) + ', '
line += str(optimal_delay / crit_delay) + ', '
line += str(optimal_delay / topk_delay) + ', '
line += str(optimal_delay / optimal_mlu_delay) + ', '
if ecmp:
line += str(optimal_delay / ecmp_delay) + ', '
assert tm_idx in self.load_multiplier, (tm_idx)
line += str(self.load_multiplier[tm_idx]) + ', '
print(line[:-2])
return [mlu, crit_mlu, topk_mlu, optimal_mlu, ecmp], \
[solution, crit_topk_solution, topk_solution, optimal_solution], [actions, crit_topk, topk]
|
from tensorflow.keras.models import Sequential
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense, Flatten, Conv2D, Concatenate, Input
class EncoderNetwork:
def __init__(self, carrier_shape=(32, 32, 3), payload_shape=(32, 32, 1)):
# super(EncoderModel, self).__init__()
self.carrier_shape = carrier_shape
self.payload_shape = payload_shape
def _init_branch_payload(self, payload):
self.branch__payload_conv_1 = Conv2D(16, 3, padding='same', activation='relu', kernel_initializer='he_normal', input_shape=self.payload_shape)(payload)
self.branch__payload_conv_2 = Conv2D(16, 3, padding='same', activation='relu', kernel_initializer='he_normal')(self.branch__payload_conv_1)
self.branch__payload_conv_3 = Conv2D(16, 3, padding='same', activation='relu', kernel_initializer='he_normal')(self.branch__payload_conv_2)
self.branch__payload_conv_4 = Conv2D(16, 3, padding='same', activation='relu', kernel_initializer='he_normal')(self.branch__payload_conv_3)
self.branch__payload_conv_5 = Conv2D(16, 3, padding='same', activation='relu', kernel_initializer='he_normal')(self.branch__payload_conv_4)
self.branch__payload_conv_6 = Conv2D(16, 3, padding='same', activation='relu', kernel_initializer='he_normal')(self.branch__payload_conv_5)
self.branch__payload_conv_7 = Conv2D(16, 3, padding='same', activation='relu', kernel_initializer='he_normal')(self.branch__payload_conv_6)
self.payload_tensors = [self.branch__payload_conv_1, self.branch__payload_conv_2,\
self.branch__payload_conv_3, self.branch__payload_conv_4, self.branch__payload_conv_5,\
self.branch__payload_conv_6, self.branch__payload_conv_7]
def _init_branch_carrier(self, carrier):
self.branch_carrier_conv_1 = Conv2D(16, 3, padding='same', activation='relu', kernel_initializer='he_normal')(carrier)
self.branch_carrier_concat_1 = Concatenate()([self.branch_carrier_conv_1, self.branch__payload_conv_1])
self.branch_carrier_conv_2 = Conv2D(32, 3, padding='same', activation='relu', kernel_initializer='he_normal')(self.branch_carrier_concat_1)
self.branch_carrier_conv_3 = Conv2D(16, 3, padding='same', activation='relu', kernel_initializer='he_normal')(self.branch_carrier_conv_2)
self.branch_carrier_concat_2 = Concatenate()([self.branch_carrier_conv_3, self.branch__payload_conv_3, self.branch_carrier_concat_1])
self.branch_carrier_conv_4 = Conv2D(32, 3, padding='same', activation='relu', kernel_initializer='he_normal')(self.branch_carrier_concat_2)
self.branch_carrier_conv_5 = Conv2D(16, 3, padding='same', activation='relu', kernel_initializer='he_normal')(self.branch_carrier_conv_4)
self.branch_carrier_concat_3 = Concatenate()([self.branch_carrier_conv_5, self.branch__payload_conv_5, self.branch_carrier_concat_2])
self.branch_carrier_conv_6 = Conv2D(32, 3, padding='same', activation='relu', kernel_initializer='he_normal')(self.branch_carrier_concat_3)
self.branch_carrier_conv_7 = Conv2D(16, 3, padding='same', activation='relu', kernel_initializer='he_normal')(self.branch_carrier_conv_6)
self.branch_carrier_concat_4 = Concatenate()([self.branch_carrier_conv_7, self.branch__payload_conv_7, self.branch_carrier_concat_3])
self.branch_carrier_conv_8 = Conv2D(32, 1, padding='same', activation='relu', kernel_initializer='he_normal')(self.branch_carrier_concat_4)
self.branch_carrier_conv_9 = Conv2D(8, 1, padding='same', activation='relu', kernel_initializer='he_normal')(self.branch_carrier_conv_8)
self.encoded_output = Conv2D(3, 1, padding='same', kernel_initializer='he_normal', name='encoded_output')(self.branch_carrier_conv_9)
def get_network(self, carrier, payload):
self._init_branch_payload(payload)
self._init_branch_carrier(carrier)
return self.encoded_output |
"""
Created on Mar 30, 2018
@author: lubo
"""
import pytest
from dae.utils.regions import Region
@pytest.mark.parametrize(
"region,count,ref_freq,alt_freq",
[
(Region("1", 11501, 11501), 1, 75.0, 25.0),
(Region("1", 11503, 11503), 1, 75.0, 25.0),
(Region("1", 11511, 11511), 1, 50.0, 50.0),
(Region("1", 11515, 11515), 1, 75.0, 25.0),
],
)
def test_variant_attributes(variants_vcf, region, count, ref_freq, alt_freq):
fvars = variants_vcf("backends/inheritance_trio")
vs = list(fvars.query_variants(regions=[region]))
assert len(vs) == count
for v in vs:
assert len(v.get_attribute("af_allele_count")) == 1
assert len(v.get_attribute("af_allele_freq")) == 1
rfreq = v["af_ref_allele_freq"]
afreq = v["af_allele_freq"]
assert ref_freq == pytest.approx(rfreq[0], 1e-2)
assert alt_freq == pytest.approx(afreq[0], 1e-2)
assert [None] == v.get_attribute("ala bala")
|
class Icecream:
def eat(self) |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-08-16 03:56
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('doctorwho', '0002_auto_20190815_2148'),
]
operations = [
migrations.RenameField(
model_name='help',
old_name='help',
new_name='feedback',
),
]
|
from cryptofield.fieldpolynom import *
from cryptofield.fieldmatrix import *
def PolynomEquilid(F, x, y):
if (x.deg() < y.deg()):
f = y.copy()
g = x.copy()
else:
f = x.copy()
g = y.copy()
polNull = FPolynom(F, [0])
if g == polNull:
return f
while True:
r = f % g
if r == polNull:
break
f = g
g = r
g.normalize()
return g
|
"""
Wiki templates based intents
"""
from .football import FootballPlayerFactIntent
from .qa_wiki import AnswersWikiIntent
|
import logging
import os
import re
from urllib.parse import urlsplit, urlunsplit
import requests
IAP_CLIENT_ID = "IAP_CLIENT_ID"
DEX_USERNAME = "DEX_USERNAME"
DEX_PASSWORD = "DEX_PASSWORD"
class AuthHandler(object):
log = logging.getLogger(__name__)
def obtain_id_token(self):
from google.auth.exceptions import DefaultCredentialsError
from google.auth.transport.requests import Request
from google.oauth2 import id_token
client_id = os.environ.get(IAP_CLIENT_ID, None)
jwt_token = None
if not client_id:
self.log.debug(
"No IAP_CLIENT_ID provided, skipping custom IAP authentication"
)
return jwt_token
try:
self.log.debug("Attempt to get IAP token for %s." + client_id)
jwt_token = id_token.fetch_id_token(Request(), client_id)
self.log.info("Obtained JWT token for IAP proxy authentication.")
except DefaultCredentialsError as ex:
self.log.warning(
str(ex)
+ (
" Note that this authentication method does not work with default"
" credentials obtained via 'gcloud auth application-default login'"
" command. Refer to documentation on how to configure service account"
" locally"
" (https://cloud.google.com/docs/authentication/production#manually)"
)
)
except Exception as e:
self.log.error("Failed to obtain IAP access token. " + str(e))
finally:
return jwt_token
def obtain_dex_authservice_session(self, kfp_api):
if DEX_USERNAME not in os.environ or DEX_PASSWORD not in os.environ:
self.log.debug(
"Skipping DEX authentication due to missing env variables"
)
return None
s = requests.Session()
r = s.get(kfp_api)
form_relative_url = re.search(
'/dex/auth/local\\?req=([^"]*)', r.text
).group(0)
kfp_url_parts = urlsplit(kfp_api)
form_absolute_url = urlunsplit(
[
kfp_url_parts.scheme,
kfp_url_parts.netloc,
form_relative_url,
None,
None,
]
)
headers = {
"Content-Type": "application/x-www-form-urlencoded",
}
data = {
"login": os.environ[DEX_USERNAME],
"password": os.environ[DEX_PASSWORD],
}
s.post(form_absolute_url, headers=headers, data=data)
return s.cookies.get_dict()["authservice_session"]
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid
from heat.common import template_format
from heat.engine.clients.os import glance
from heat.engine.clients.os import nova
from heat.engine import environment
from heat.engine.resources.aws.ec2 import instance as instances
from heat.engine import scheduler
from heat.engine import stack as parser
from heat.engine import template as tmpl
from heat.tests import common
from heat.tests.openstack.nova import fakes as fakes_nova
from heat.tests import utils
instance_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "WordPress",
"Parameters" : {
"KeyName" : {
"Description" : "KeyName",
"Type" : "String",
"Default" : "test"
}
},
"Resources" : {
"WebServer": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId" : "CentOS 5.2",
"InstanceType" : "256 MB Server",
"KeyName" : "test",
"UserData" : "wordpress"
}
}
}
}
'''
class ServerTagsTest(common.HeatTestCase):
def setUp(self):
super(ServerTagsTest, self).setUp()
self.fc = fakes_nova.FakeClient()
def _mock_get_image_id_success(self, imageId_input, imageId):
self.m.StubOutWithMock(glance.GlanceClientPlugin,
'find_image_by_name_or_id')
glance.GlanceClientPlugin.find_image_by_name_or_id(
imageId_input).MultipleTimes().AndReturn(imageId)
def _setup_test_instance(self, intags=None, nova_tags=None):
stack_name = 'tag_test'
t = template_format.parse(instance_template)
template = tmpl.Template(t,
env=environment.Environment(
{'KeyName': 'test'}))
self.stack = parser.Stack(utils.dummy_context(), stack_name, template,
stack_id=str(uuid.uuid4()))
t['Resources']['WebServer']['Properties']['Tags'] = intags
resource_defns = template.resource_definitions(self.stack)
instance = instances.Instance(stack_name,
resource_defns['WebServer'], self.stack)
self.m.StubOutWithMock(nova.NovaClientPlugin, '_create')
nova.NovaClientPlugin._create().AndReturn(self.fc)
self._mock_get_image_id_success('CentOS 5.2', 1)
# need to resolve the template functions
metadata = instance.metadata_get()
server_userdata = instance.client_plugin().build_userdata(
metadata,
instance.t['Properties']['UserData'],
'ec2-user')
self.m.StubOutWithMock(nova.NovaClientPlugin, 'build_userdata')
nova.NovaClientPlugin.build_userdata(
metadata,
instance.t['Properties']['UserData'],
'ec2-user').AndReturn(server_userdata)
self.m.StubOutWithMock(self.fc.servers, 'create')
self.fc.servers.create(
image=1, flavor=1, key_name='test',
name=utils.PhysName(stack_name, instance.name),
security_groups=None,
userdata=server_userdata, scheduler_hints=None,
meta=nova_tags, nics=None, availability_zone=None,
block_device_mapping=None).AndReturn(
self.fc.servers.list()[1])
return instance
def test_instance_tags(self):
tags = [{'Key': 'Food', 'Value': 'yum'}]
metadata = dict((tm['Key'], tm['Value']) for tm in tags)
instance = self._setup_test_instance(intags=tags, nova_tags=metadata)
self.m.ReplayAll()
scheduler.TaskRunner(instance.create)()
# we are just using mock to verify that the tags get through to the
# nova call.
self.m.VerifyAll()
def test_instance_tags_updated(self):
tags = [{'Key': 'Food', 'Value': 'yum'}]
metadata = dict((tm['Key'], tm['Value']) for tm in tags)
instance = self._setup_test_instance(intags=tags, nova_tags=metadata)
self.m.ReplayAll()
scheduler.TaskRunner(instance.create)()
self.assertEqual((instance.CREATE, instance.COMPLETE), instance.state)
# we are just using mock to verify that the tags get through to the
# nova call.
self.m.VerifyAll()
self.m.UnsetStubs()
new_tags = [{'Key': 'Food', 'Value': 'yuk'}]
new_metadata = dict((tm['Key'], tm['Value']) for tm in new_tags)
self.m.StubOutWithMock(self.fc.servers, 'set_meta')
self.fc.servers.set_meta(self.fc.servers.list()[1],
new_metadata).AndReturn(None)
self.stub_ImageConstraint_validate()
self.stub_KeypairConstraint_validate()
self.stub_FlavorConstraint_validate()
self.m.ReplayAll()
update_template = copy.deepcopy(instance.t)
update_template['Properties']['Tags'] = new_tags
scheduler.TaskRunner(instance.update, update_template)()
self.assertEqual((instance.UPDATE, instance.COMPLETE), instance.state)
self.m.VerifyAll()
|
import base64
try:
import cryptography
from cryptography.fernet import Fernet, InvalidToken
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
HAS_CRYPTO = True
except:
HAS_CRYPTO = False
class criptografia:
def getEncryptor(self, password):
if not isinstance(password, bytes):
password = bytes(password.encode())
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=b'\x15%q\xe6\xbb\x02\xa6\xf8\x13q\x90\xcf6+\x1e\xeb',
iterations=100000,
backend=default_backend()
)
key = base64.urlsafe_b64encode(kdf.derive(password))
return key
def encrypt(self, backup: str, password: str):
try:
base = self.getEncryptor(password)
encriptacion = Fernet(base)
Encriptando = backup.encode()
except Exception:
return "1"
try:
mensajeEncriptado = encriptacion.encrypt(Encriptando)
return str(mensajeEncriptado.decode())
except InvalidToken as e:
return "1"
def decrypt(self, cipherBackup: str, password: str):
try:
cipherBackup = cipherBackup.encode()
base = self.getEncryptor(password)
desencriptacion = Fernet(base)
valorCodificado = desencriptacion.decrypt(cipherBackup)
mensaje = valorCodificado.decode()
return str(mensaje)
except Exception:
return "1"
Crip = criptografia()
"""val = Crip.encrypt("hola", "DATO1345")
print("Encrip: ", val)
print("valordec: ", Crip.decrypt( val , "DATO1345"))"""
|
"""Tests for random geometric graphs"""
import Box2D.b2 as b2
import shapely.geometry
import metis
from metis.factored_random_geometric_graphs import (
FactoredRandomGeometricGraph, NoObjectContactBlacklist)
from metis.debug import graphical_debug, draw_polygon, draw_polygons
def example_world():
"""Create an example Box2D world for testing
Returns:
tuple(world, bodies, configuration):
- world is a Box2D world with multiple dynamic bodies
- bodies is a dictionary mapping object names to their Box2D
body
- configuration is an example collision-free configuration
"""
get_triangles = metis.geometry.box2d_triangles_from_shapely
obstacle_geometry = shapely.geometry.box(0, 0, 10, 10)
obstacle_geometry = obstacle_geometry.difference(
obstacle_geometry.buffer(-.2))
obstacle_geometry = obstacle_geometry.union(
shapely.geometry.LineString([(5, 0), (5, 10)]).buffer(.1, cap_style=2))
obstacle_geometry = obstacle_geometry.difference(
shapely.geometry.Point(5, 2.5).buffer(1, cap_style=1))
obstacle_geometry = obstacle_geometry.difference(
shapely.geometry.Point(5, 7.5).buffer(1, cap_style=1))
world = b2.world()
obstacles = world.CreateStaticBody()
for triangle in get_triangles(obstacle_geometry):
_ = obstacles.CreateFixture(shape=triangle)
agent = world.CreateDynamicBody()
agent_geometry = shapely.geometry.Polygon([
(2./3., 0.), (-1./3., .4), (-1./3., -.4)])
for triangle in get_triangles(agent_geometry):
_ = agent.CreateFixture(shape=triangle)
boxes = [world.CreateDynamicBody() for _ in xrange(2)]
for box in boxes:
box.CreateFixture(shape=b2.polygonShape(box=(.8, .8)))
bodies = {'robot': agent, 'box1': boxes[0], 'box2': boxes[1]}
sample_configuration = {
'robot': (1, 2, 0), 'box1': (3, 2, -.2), 'box2': (5, 2.5, 0.1)}
return world, bodies, sample_configuration
def test_acyclic_chains():
"""Check that the correct number of acyclic chains are generated"""
names = ['robot', 'box1', 'box2']
chains = lambda: FactoredRandomGeometricGraph.acyclic_chains(names)
expected_number = 16
actual_number = sum(1 for _ in chains())
assert actual_number == expected_number, \
"Expected {} chains; actual value was {}".format(
expected_number, actual_number)
assert all(
FactoredRandomGeometricGraph.is_acyclic(chain)
for chain in chains())
def test_contains():
"""Check that neighbors are computed correctly"""
world, bodies, _ = example_world()
geometry = metis.geometry.ManyShapeGeometry(world, bodies)
dynamics = metis.dynamics.MagneticDynamics(bodies)
factored_graph = FactoredRandomGeometricGraph(
geometry, dynamics, default_count=5, blacklist=NoObjectContactBlacklist())
assert {'robot': (None, 0), 'box1': (None, 0), 'box2': (None, 0)} in factored_graph
assert {'robot': ('box1', 0), 'box1': (None, 0), 'box2': (None, 0)} in factored_graph
assert {'robot': ('box1', 5), 'box1': (None, 0), 'box2': (None, 0)} not in factored_graph
assert {'robot': ('robot', 0), 'box1': (None, 0), 'box2': (None, 0)} not in factored_graph
assert {'robot': ('box1', 0), 'box1': ('robot', 0), 'box2': (None, 0)} not in factored_graph
def test_iter():
"""Check if we can iterate over all vertices"""
world, bodies, _ = example_world()
geometry = metis.geometry.ManyShapeGeometry(world, bodies)
dynamics = metis.dynamics.MagneticDynamics(bodies)
factored_graph = FactoredRandomGeometricGraph(
geometry, dynamics, default_count=5, blacklist=NoObjectContactBlacklist())
assert sum(1 for _ in factored_graph) == len(factored_graph), \
"__iter__ should be consistent with __len__"
for vertex in factored_graph:
assert vertex in factored_graph, \
"__iter__ should be consistent with __contains__"
def test_getitem():
"""Check if we can compute poses from vertices"""
world, bodies, _ = example_world()
geometry = metis.geometry.ManyShapeGeometry(world, bodies)
dynamics = metis.dynamics.MagneticDynamics(bodies)
factored_graph = FactoredRandomGeometricGraph(
geometry, dynamics, default_count=5,
blacklist=NoObjectContactBlacklist())
vertices = [
{'robot': (None, 0), 'box1': (None, 0), 'box2': (None, 0)},
{'robot': ('box1', 0), 'box1': (None, 0), 'box2': (None, 0)},]
for vertex in vertices:
configuration = factored_graph[vertex]
assert all(name in configuration for name in factored_graph.names)
assert all(len(pose) == 3 for pose in configuration.itervalues())
def test_nearest():
"""Check that we can look up the nearest vertex"""
world, bodies, sample_configuration = example_world()
geometry = metis.geometry.ManyShapeGeometry(world, bodies)
dynamics = metis.dynamics.MagneticDynamics(bodies)
configurations = {(name, None): [value,]
for name, value in sample_configuration.iteritems()}
factored_graph = FactoredRandomGeometricGraph(
geometry, dynamics, default_count=100,
blacklist=NoObjectContactBlacklist(), configurations=configurations)
vertex = factored_graph.nearest(sample_configuration)
assert vertex in factored_graph
assert geometry.configuration_is_free(factored_graph[vertex])
nearest_configuration = factored_graph[vertex]
assert nearest_configuration.keys() == sample_configuration.keys()
for name in sample_configuration:
assert all(nearest_configuration[name] == sample_configuration[name])
factored_graph = FactoredRandomGeometricGraph(
geometry, dynamics, default_count=100, blacklist=NoObjectContactBlacklist())
vertex = factored_graph.nearest(sample_configuration)
assert vertex in factored_graph
assert geometry.configuration_is_free(factored_graph[vertex])
nearest_configuration = factored_graph[vertex]
assert nearest_configuration.keys() == sample_configuration.keys()
def test_neighbors():
"""Check that neighbors are computed correctly"""
world, bodies, sample_configuration = example_world()
geometry = metis.geometry.ManyShapeGeometry(world, bodies)
dynamics = metis.dynamics.MagneticDynamics(bodies)
configurations = {(name, None): [value,]
for name, value in sample_configuration.iteritems()}
factored_graph = FactoredRandomGeometricGraph(
geometry, dynamics, default_count=100,
blacklist=NoObjectContactBlacklist(), configurations=configurations)
vertex = factored_graph.nearest(sample_configuration)
neighbors = list(factored_graph.neighbors(vertex))
duplicates = set()
seen = set()
for neighbor in neighbors:
if neighbor in seen:
duplicates.add(neighbor)
else:
seen.add(neighbor)
assert len(duplicates) == 0, (
"Neighbors should be unique: had duplicate elements"
"\n\t".join(str(d) for d in duplicates)
)
assert any(v['robot'][0] is not None for v in neighbors), graphical_debug(
"There should be at least one neighbor with an object in its grasp",
lambda ax: draw_polygons(ax, {
name: metis.geometry.shapely_from_box2d_body(bodies[name], pose)
for name, pose in factored_graph[vertex].iteritems()}))
grasping = next(v for v in neighbors if v['robot'][0] is not None)
neighbors = list(factored_graph.neighbors(grasping))
assert any(v['robot'][0] is None for v in neighbors), graphical_debug(
"There should be at least one neighbor without an object in its grasp",
lambda ax: draw_polygons(ax, {
name: metis.geometry.shapely_from_box2d_body(bodies[name], pose)
for name, pose in factored_graph[grasping].iteritems()}))
|
from __future__ import division, print_function
import unittest
import mock
from smqtk.algorithms.nn_index.lsh.functors import \
LshFunctor, get_lsh_functor_impls
class TestLshFunctorImplGetter (unittest.TestCase):
@mock.patch('smqtk.algorithms.nn_index.lsh.functors.plugin.get_plugins')
def test_get_lsh_functor_impls_no_reload(self, m_get_plugins):
get_lsh_functor_impls()
m_get_plugins.assert_called_once()
self.assertEqual(m_get_plugins.call_args[0][0],
'smqtk.algorithms.nn_index.lsh.functors')
self.assertEqual(m_get_plugins.call_args[0][2],
'LSH_FUNCTOR_PATH')
self.assertEqual(m_get_plugins.call_args[0][3],
'LSH_FUNCTOR_CLASS')
self.assertEqual(m_get_plugins.call_args[0][4],
LshFunctor)
self.assertFalse(m_get_plugins.call_args[1]['reload_modules'])
@mock.patch('smqtk.algorithms.nn_index.lsh.functors.plugin.get_plugins')
def test_get_lsh_functor_impls_with_reload(self, m_get_plugins):
get_lsh_functor_impls(True)
m_get_plugins.assert_called_once()
self.assertEqual(m_get_plugins.call_args[0][0],
'smqtk.algorithms.nn_index.lsh.functors')
# m_get_plugins.call_args[0][1] is a path depending on where the python
# code is.
self.assertEqual(m_get_plugins.call_args[0][2],
'LSH_FUNCTOR_PATH')
self.assertEqual(m_get_plugins.call_args[0][3],
'LSH_FUNCTOR_CLASS')
self.assertEqual(m_get_plugins.call_args[0][4],
LshFunctor)
self.assertTrue(m_get_plugins.call_args[1]['reload_modules'])
|
# -*- coding: utf-8 -*-
import sys
import numpy
from utils import *
class LogisticRegression(object):
def __init__(self, input, label, n_in, n_out):
self.x = input
self.y = label
self.W = numpy.zeros((n_in, n_out)) # initialize W 0
self.b = numpy.zeros(n_out) # initialize bias 0
def train(self, lr=0.1, input=None, L2_reg=0.00):
if input is not None:
self.x = input
p_y_given_x = self.output(self.x)
d_y = self.y - p_y_given_x
self.W += lr * numpy.dot(self.x.T, d_y) - lr * L2_reg * self.W
self.b += lr * numpy.mean(d_y, axis=0)
self.d_y = d_y
# def train(self, lr=0.1, input=None, L2_reg=0.00):
# self.forward(input)
# self.backward(lr, L2_reg)
# def forward(self, input=None):
# if input is not None:
# self.x = input
# p_y_given_x = self.output(self.x)
# self.d_y = self.y - p_y_given_x
# def backward(self, lr=0.1, L2_reg=0.00):
# self.W += lr * numpy.dot(self.x.T, self.d_y) - lr * L2_reg * self.W
# self.b += lr * numpy.mean(self.d_y, axis=0)
def output(self, x):
# return sigmoid(numpy.dot(x, self.W) + self.b)
return softmax(numpy.dot(x, self.W) + self.b)
def predict(self, x):
return self.output(x)
def negative_log_likelihood(self):
# sigmoid_activation = sigmoid(numpy.dot(self.x, self.W) + self.b)
sigmoid_activation = softmax(numpy.dot(self.x, self.W) + self.b)
cross_entropy = - numpy.mean(
numpy.sum(self.y * numpy.log(sigmoid_activation) +
(1 - self.y) * numpy.log(1 - sigmoid_activation),
axis=1))
return cross_entropy
def test_lr(learning_rate=0.1, n_epochs=500):
rng = numpy.random.RandomState(123)
# training data
d = 2
N = 10
x1 = rng.randn(N, d) + numpy.array([0, 0])
x2 = rng.randn(N, d) + numpy.array([20, 10])
y1 = [[1, 0] for i in range(N)]
y2 = [[0, 1] for i in range(N)]
x = numpy.r_[x1.astype(int), x2.astype(int)]
y = numpy.r_[y1, y2]
# construct LogisticRegression
classifier = LogisticRegression(input=x, label=y, n_in=d, n_out=2)
# train
for epoch in range(n_epochs):
classifier.train(lr=learning_rate)
# cost = classifier.negative_log_likelihood()
# print >> sys.stderr, 'Training epoch %d, cost is ' % epoch, cost
learning_rate *= 0.995
# test
result = classifier.predict(x)
for i in range(N):
print (result[i])
print()
for i in range(N):
print (result[N+i])
if __name__ == "__main__":
test_lr()
|
from typing import List
import stweet as st
def _scrap_tweets_with_count_assert(count: int):
phrase = '#koronawirus'
search_tweets_task = st.SearchTweetsTask(
all_words=phrase,
tweets_limit=count
)
tweets_collector = st.CollectorTweetOutput()
st.TweetSearchRunner(
search_tweets_task=search_tweets_task,
tweet_outputs=[tweets_collector]
).run()
assert len(tweets_collector.get_scrapped_tweets()) == count
def test_scrap_small_count_of_tweets():
_scrap_tweets_with_count_assert(10)
def test_scrap_medium_count_of_tweets():
_scrap_tweets_with_count_assert(100)
def test_scrap_big_count_of_tweets():
_scrap_tweets_with_count_assert(299)
|
import tensorflow_datasets as tfds
import tensorflow as tf
import time
import numpy as np
import matplotlib.pyplot as plt
def create_padding_mask(seq):
seq = tf.cast(tf.math.equal(seq,0), tf.float32)
return seq[:,tf.newaxis,tf.newaxis,:]
def create_look_ahead_mask(size):
mask = 1 - tf.linalg.band_part(tf.ones((size,size)), -1 , 0)
return mask
def get_angles(pos,i,d_model):
angle_rates = 1/np.power(10000,(2*(i//2))/np.float32(d_model))
return pos * angle_rates
def positional_encoding(position, d_model):
angle_rads = get_angles(np.arange(position)[:,np.newaxis],np.arange(d_model)[np.newaxis,:], d_model)
print(angle_rads.shape)
angle_rads[:,0::2] = np.sin(angle_rads[:,0::2])
angle_rads[:,1::2] = np.cos(angle_rads[:,1::2])
pos_encoding = angle_rads[np.newaxis,...]
return tf.cast(pos_encoding, dtype = tf.float32)
#positional_encoding(10,10)
def scaled_dot_product_attention(q,k,v,mask):
"""
计算注意力权重。
q,k,v 必须具有匹配的前置维度。
k,v 必须具有匹配的导数第二个维度,例如:seq_len_k = seq_len_v.
虽然mask根据其类型有不同的形状,
但是mask 必须能进行广播转换以便求和。
参数:
q:请求的形状 == (..., seq_len_q, depth)
k:主键的形状 == (...,seq_len_k,depth)
v:数值的形状 == (...,seq_len_v,depth_v)
mask: Float 张量,其形状能转换成
(...,seq_len_q,seq_len_k).默认为None.
返回值:
输出,注意力权重
"""
matmul_qk = tf.matmul(q,k,transpose_b = True)#(...,seq_len_q,seq_len_k)
#https://blog.csdn.net/qq_37430422/article/details/105042303 这里解释了为什么要缩放
#缩放 matmul_qk
dk = tf.cast(tf.shape(k)[-1],tf.float32)
scaled_dot_product_logits = matmul_qk/tf.math.sqrt(dk)
#将mask加入到缩放的张量上。
if mask is not None:
scaled_dot_product_logits+=(mask*-1e9)
attention_weights = tf.nn.softmax(scaled_dot_product_logits,axis = -1)
output = tf.matmul(attention_weights,v)
return output, attention_weights
class MultiHeadAttention(tf.keras.layers.Layer):
def __init__(self,d_model,num_heads):
super(MultiHeadAttention,self).__init__()
self.num_heads = num_heads
self.d_model = d_model
assert d_model%self.num_heads==0
self.depth = d_model//self.num_heads
self.wq = tf.keras.layers.Dense(d_model)
self.wk = tf.keras.layers.Dense(d_model)
self.wv = tf.keras.layers.Dense(d_model)
self.dense = tf.keras.layers.Dense(d_model)
def split_heads(self,x,batch_size):
x = tf.reshape(x,(batch_size,-1,self.num_heads,self.depth))
return tf.transpose(x,perm=[0,2,1,3])
def call(self, v,k,q,mask):
batch_size = tf.shape(q)[0]
q=self.wq(q)
k=self.wk(k)
v=self.wv(v)
q = self.split_heads(q,batch_size)
k = self.split_heads(k,batch_size)
v = self.split_heads(v,batch_size)
scaled_attention , attention_weights = scaled_dot_product_attention(q,k,v,mask)
scaled_attention = tf.transpose(scaled_attention,perm=[0,2,1,3])
concat_attention = tf.reshape(scaled_attention,(batch_size,-1,self.d_model))
output = self.dense(concat_attention)
return output, attention_weights
def point_wise_feed_forward_network(d_model,dff):
return tf.keras.Sequential([tf.keras.layers.Dense(dff, activation = 'relu'),tf.keras.layers.Dense(d_model)])
#encoder layer
class EncoderLayer(tf.keras.layers.Layer):
def __init__(self,d_model,num_heads,dff,rate=0.1):
super(EncoderLayer,self).__init__()
self.mha = MultiHeadAttention(d_model,num_heads)
self.ffn = point_wise_feed_forward_network(d_model,dff)
self.layernorm1 = tf.keras.LayerNormalization(epsilon = 1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon = 1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
def call(self,x,training,mask):
attn_output, _ = self.mha(x,x,x,mask)
attn_output = self.dropout1(attn_output,training=training)
out1 = self.layernorm1(x + attn_output)
ffn_output = self.ffn(out1)
ffn_output = self.dropout1(ffn_output,training = training)
out2 = self.layernorm2(out1 + ffn_output)
return out2
#decoder layer
class DecoderLayer(tf.keras.layers.Layer):
def __init__(self,d_model,num_heads,dff, rate = 0.1):
super(DecoderLayer,self).__init__()
self.mha1 = MultiHeadAttention(d_model,num_heads)
self.mha2 = MultiHeadAttention(d_model,num_heads)
self.ffn = point_wise_feed_forward_network(d_model,dff)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon = 1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon = 1e-6)
self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon = 1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
self.dropout3 = tf.keras.layers.Dropout(rate)
def call(self,x,enc_output,training,look_ahead_mask,padding_mask):
#attn1,attn_weights_block1 = self.mha1(x,x,x,look_ahead_mask)
attn1,attn_weights_block1 = self.mha1(x,x,x,padding_mask)
attn1 = self.dropout1(attn1,training=training)
out1 = self.layernorm1(attn1 + x)
attn2, attn_weights_block2 = self.mha2(enc_output,enc_output,out1,padding_mask)
attn2 = self.dropout2(attn2,training=training)
out2 = self.layernorm2(attn2 + out1)
ffn_output = self.ffn(out2)
ffn_output = self.dropout3(ffn_output,training=training)
out3 = self.layernorm3(ffn_output + out2)
return out3, attn_weights_block1, attn_weights_block2
#Encoder
class Encoder(tf.keras.layers.Layer):
def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size, maximum_position_encoding,rate=0.1):
super(Encoder,self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = tf.keras.layers.Embedding(input_vocab_size,d_model)
self.pos_encoding = positional_encoding(maximum_position_encoding, self.d_model)
self.enc_layers = [EncoderLayer(d_model, num_heads, dff, rate) for _ in range(num_layers)]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self,x,training,mask):
seq_len = tf.shape(x)[1]
x = self.embedding(x)
x*=tf.math.sqrt(tf.cast(self.d_model,tf.float32)) #这里将x放大是为了使x相对位置编码更大,从而减少位置编码的影响
x += self.pos_encoding[:,:seq_len,:]
x = self.dropout(x,training=training)
for i in range(self.num_layers):
x = self.enc_layers[i](x,training,mask)
return x
class Decoder(tf.keras.layers.Layer):
def __init__(self,num_layers,d_model,num_heads,dff, maximum_position_encoding, rate=0.1):
super(Decoder,self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = tf.keras.layers.Dense(d_model);
self.pos_encoding = positional_encoding(maximum_position_encoding,d_model);
self.dec_layers = [DecoderLayer(d_model,num_heads,dff,rate) for _ in range(num_layers)]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self,x,enc_output,training,look_ahead_mask,padding_mask):
seq_len = tf.shape(x)[1]
print("seq_len:",seq_len)
attention_weights = {}
x = self.embedding(x) #(batch_size,target_seq_len,d_model)
print("x.shape:",x.shape)
#这里放大x,是为了让编码信息相对较小
x*= tf.math.sqrt(tf.cast(self.d_model,tf.float32))
x+= self.pos_encoding[:,:seq_len,:]
x = self.dropout(x,training = training)
for i in range(self.num_layers):
x,block1,block2 = self.dec_layers[i](x,enc_output,training,look_ahead_mask,padding_mask)
attention_weights['decoder_layer{}_block1'.format(i+1)] = block1
attention_weights['decoder_layer{}_block2'.format(i+1)] = block2
return x, attention_weights
class Transformer(tf.keras.Model):
def __init__(self,num_layers,d_model,num_heads,dff,input_vocab_size,target_vocab_size,pe_input,pe_target, rate=0.1):
super(Transformer,self).__init__()
self.encoder = Encoder(num_layers,d_model,num_heads,dff,input_vocab_size,pe_input,rate)
self.decoder = Decoder(num_layers,d_model,num_heads,dff,target_vocab_size,pe_target,rate)
self.final_layer = tf.keras.layers.Dense(target_vocab_size)
def call(self, inp, tar, training, enc_padding_mask, look_ahead_mask, dec_padding_mask):
enc_output = self.encoder(inp,training,enc_padding_mask)
dec_output, attention_weights = self.decoder(tar,enc_output,training, look_ahead_mask,dec_padding_mask)
final_output = self.final_layer(dec_output)#(batch_size,tar_seq_len,target_vocab_size)
return final_output, attention_weights
if __name__ == "__main__":
def print_out(q, k, v):
temp_out, temp_attn = scaled_dot_product_attention(
q, k, v, None)
print ('Attention weights are:')
print (temp_attn)
print ('Output is:')
print (temp_out)
np.set_printoptions(suppress=True)
temp_k = tf.constant([[10,0,0],
[0,10,0],
[0,0,10],
[0,0,10]], dtype=tf.float32) # (4, 3)
temp_v = tf.constant([[ 1,0],
[ 10,0],
[ 100,5],
[1000,6]], dtype=tf.float32) # (4, 2)
# 这条 `请求(query)符合第二个`主键(key)`
# # 因此返回了第二个`数值(value)`。
temp_q = tf.constant([[0, 10, 0]], dtype=tf.float32) # (1, 3)
print_out(temp_q, temp_k, temp_v)
|
import pygame
from . import ColorPicker, Transform
from UI import Button
class PropertyPanel:
def __init__(self, x,y, properties, UIManager, selected_obj):
self.x, self.y = x,y
self.w, self.h = 320,10
self.properties_obj = {}
self.padding = 10
self.linking = False
for p in properties:
if p == "Transform":
transform = Transform(self.x,self.y + self.h, UIManager)
self.properties_obj[p] = transform
self.h += transform.h + self.padding
if p == "ColorPicker":
cp = ColorPicker(self.x,self.y + self.h, UIManager)
self.properties_obj[p] = cp
self.h += cp.h + self.padding
if p == "Player_Id":
text = f"player : {selected_obj.player_id}"
b = Button(self.x+10,self.y+self.h,
300,70,text, (170,170,170), selected_obj.switch_player_id, [], center_text=True)
UIManager.add(b)
self.properties_obj[p] = b
self.h += b.rect.h + self.padding
if p == "Linker":
b = Button(self.x + 10, self.y + self.h,
300,70,"Link", (170,170,170), self.toggle_linker, [])
UIManager.add(b)
self.properties_obj[p] = b
self.h += b.rect.h + self.padding
self.rect = pygame.Rect(self.x,self.y,self.w,self.h)
def draw(self, surface):
pygame.draw.rect(surface, (100,100,100), (self.x, self.y, self.w, self.h))
for key,obj in self.properties_obj.items():
obj.draw(surface)
def destroy(self, UIManager):
for key,obj in self.properties_obj.items():
obj.destroy(UIManager)
def set_color(self, color):
if "ColorPicker" in self.properties_obj:
self.properties_obj["ColorPicker"].set_color(color)
def get_color(self):
if "ColorPicker" in self.properties_obj:
return self.properties_obj["ColorPicker"].get_color()
def set_transform(self, rect):
self.properties_obj["Transform"].set_transform(rect)
def get_transform(self):
if "Transform" in self.properties_obj:
return self.properties_obj["Transform"].get_transform()
def toggle_linker(self, btn, player_id):
self.linking = not self.linking
def is_hovered(self, mouse_position):
return self.rect.collidepoint(mouse_position) |
from django.urls import path
from naccbisapp.views import LeaderboardView, TeamOffenseView
urlpatterns = [
path('leaders/batters', LeaderboardView.as_view()),
path('leaders/team_offense', TeamOffenseView.as_view()),
]
|
# Convert arrays into sets so I can use 'in'
try recursion in version 2
# Find all possible markings
def all_possible_markings:
max_size = 25
possible_markings[1] = [0, 1]
for current_size in range(2, max_size):
for possible_marking in possible_markings[current_size - 1]:
possible_markings[current_size] = possible_marking + current_size + [0, current_size] # move this bit
return possible_markings
# Check whether each set of marks (possible_marking)can measure every length
def can_measure_all(marking):
measurable_lengths = []
for start_mark in marking:
for end_mark in marking:
measured_length = end_mark - start_mark
if measured_length not in measurable_lengths:
add
measured_length
to
measurable_lengths
if len(measurable_length) == length_of_ruler + 1: # including zero hence the +1
return True # can measure every length
return False
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-11 14:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('repositories', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='commit',
name='code',
field=models.FileField(help_text='Upload a zip file with the directory that contains code that you want the system to assess', upload_to=b'', verbose_name='Code for this commit'),
),
]
|
import os
import tempfile
import shutil
import string
import random
from unittest import TestCase
from urltomd import Content, Mapper
class BaseTestCase(TestCase):
"""
Will automatically create a random, empty directory so that the
tests have a place to work with files. Will also clean and
remove the directory afterwards.
Additionally it defines some functions that are used in multiple
tests.
This is not actually running any tests but to be subclassed by
the actual tests.
"""
def setUp(self):
self.path = tempfile.mkdtemp()
def create_md_file(self):
"""
Just a wrapper of `tempfile.mkstemp` that specifies a few
default parameters like an `.md` suffix and the current
test's path as dir.
"""
return tempfile.mkstemp(suffix='.md', dir=self.path)
def gen_rand_str(self, size=10,
chars=string.ascii_uppercase + string.digits):
"""
Generate random strings for all purposes. Takes two optional
parameters:
:param size: How long is the string supposed to be (in
chars). The default is 10.
:param chars: The collection of chars to pick from. Per
default all uppercase letters from ascii and all
digits.
Copied from http://stackoverflow.com/a/2257449/1743565
"""
return ''.join(random.choice(chars) for x in range(size))
def tearDown(self):
shutil.rmtree(self.path)
class MapperTestCase(BaseTestCase):
"""
Tests the content class.
"""
def setUp(self):
super(MapperTestCase, self).setUp()
self.mapper = Mapper(self.path)
def test_url2path(self):
"""
Test whether urls are correctly converted.
"""
urlpaths = {
'/about/': 'about.md',
'about': 'about.md',
'/about': 'about.md',
'about/': 'about.md',
}
for url, path in urlpaths.items():
assert self.mapper.url2path(url) == os.path.join(self.path, path)
assert self.mapper.url2path(url, relative=True) == path
def test_exists(self):
"""
Test whether file existence checks work.
"""
url = self.gen_rand_str()
assert self.mapper.exists(url) == False
f = open(self.mapper.url2path(url), 'w')
f.close()
assert self.mapper.exists(url) == True
def test_get(self):
"""
Make sure that proper contentobjects are returned (of the
custom class, if defined).
"""
url = self.gen_rand_str()
content = self.mapper.get(url)
assert content == None
f = open(self.mapper.url2path(url), 'w')
f.close()
content = self.mapper.get(url)
assert content != None
assert isinstance(content, Content)
class CustomContent(Content):
pass
custommapper = Mapper(self.path, contentclass=CustomContent)
content = custommapper.get(url)
assert content != None
assert isinstance(content, CustomContent)
def test_create(self):
"""
Create only non existing files but necessary directories.
"""
url = self.gen_rand_str()
f = open(self.mapper.url2path(url), 'w')
f.close()
content = self.mapper.create(url)
assert content == False
dirurl = self.gen_rand_str()
deepurl = os.path.join(dirurl, url)
content = self.mapper.create(deepurl)
assert os.path.exists(os.path.join(self.path, dirurl))
superdirurl = self.gen_rand_str()
deepdeepurl = os.path.join(superdirurl, dirurl, url)
content = self.mapper.create(deepdeepurl)
assert os.path.exists(os.path.join(self.path, superdirurl, dirurl))
def test_delete(self):
"""
Make sure deletion works.
"""
url = self.gen_rand_str()
result = self.mapper.delete(url)
assert result == False
f = open(self.mapper.url2path(url), 'w')
f.close()
result = self.mapper.delete(url)
assert result == True
assert os.path.exists(self.mapper.url2path(url)) == False
class ContentTestCase(BaseTestCase):
pass
|
import os
from pathlib import Path
def build_manifest():
if not os.path.exists("MANIFEST.in"):
Path("MANIFEST.in").touch() |
def convert_answers_to_payload_0_0_2(answer_store, schema, routing_path):
"""
Convert answers into the data format below
'data': [
{
'value': 'Joe Bloggs',
'answer_id': 'household-full-name',
'group_instance': 0,
'answer_instance': 0
},
{
'value': 'Fred Flintstone',
'answer_id': 'household-full-name',
'group_instance': 0,
'answer_instance': 1
},
{
'value': 'Husband or wife',
'answer_id': 'who-is-related',
'group_instance': 0,
'answer_instance': 0
}
]
:param answer_store: questionnaire answers
:param routing_path: the path followed in the questionnaire
:return: data in a formatted form
"""
data = []
for location in routing_path:
answer_ids = schema.get_answer_ids_for_block(location.block_id)
answers_in_block = answer_store.filter(answer_ids, location.group_instance)
data.extend(answers_in_block)
return data
|
from dataclasses import dataclass
from typing import Callable
from rxbp.init.initsubscription import init_subscription
from rxbp.mixins.flowablemixin import FlowableMixin
from rxbp.observables.fromsingleelementobservable import FromSingleElementObservable
from rxbp.subscriber import Subscriber
from rxbp.subscription import Subscription
from rxbp.typing import ElementType
@dataclass
class FromSingleElementFlowable(FlowableMixin):
lazy_elem: Callable[[], ElementType]
def unsafe_subscribe(self, subscriber: Subscriber) -> Subscription:
return init_subscription(
observable=FromSingleElementObservable(
lazy_elem=self.lazy_elem,
subscribe_scheduler=subscriber.subscribe_scheduler,
),
)
|
import pandas as pd
import numpy as np
import os
import re
import requests
import time
source = 'https://services1.arcgis.com/vdNDkVykv9vEWFX4/arcgis/rest/services/Child_Nutrition/FeatureServer'
in_path = source + '/0/query?outFields=*&where=1%3D1&f=geojson'
out_dir = 'food-data/Cleaned_data_files'
out_path = os.path.join(out_dir, 'cleaned_summer_meal_sites_api.csv')
final_cols = ['id', 'source_org', 'source_file', 'original_id', 'type', 'name', 'address', 'city',
'state', 'zip_code', 'county', 'location_description', 'phone', 'url', 'latitude',
'longitude', 'latlng_source', 'date_from', 'date_to', 'SNAP', 'WIC', 'FMNP',
'fresh_produce', 'food_bucks', 'free_distribution', 'open_to_spec_group', 'data_issues']
### Summer Meal Sites ###
raw_dat = requests.get(in_path).json() #pd.read_csv(in_path, encoding = 'ansi')
get_entries = []
for i in range(len(raw_dat['features'])):
get_entries.append(pd.json_normalize(raw_dat['features'][i]['properties']))
df = pd.concat(get_entries).reset_index(drop = True)
# Filter down to Allegheny County sites only
df = df.loc[(df['Site_State'] == 'PA') & (df['Site_County'] == 'Allegheny')].reset_index()
# Assign some columns to schema fields
df['original_id'] = df['Site_ID_External']
df['name'] = df['Site_Name'].str.title() # deal with ALL CAPS
df['city'] = df['Site_City'].str.title()
df['state'] = df['Site_State']
df['zip_code'] = df['Site_Zip'].str.zfill(5)
df['address'] = df['Site_Street']
df['county'] = df['Site_County']
df['latitude'] = df['Latitude']
df['longitude'] = df['Longitude']
df['date_from'] = df['Start_Date'].apply(lambda x: time.strftime('%B %d %Y', time.localtime(x/1000)) if x != None else None)
df['date_to'] = df['End_Date'].apply(lambda x: time.strftime('%B %d %Y', time.localtime(x/1000)) if x != None else None)
## add into to location description
df["location_description"] = None
for row in range(len(df)):
res = ''
if df['Site_Street2'][row] != '':
res = res + 'Site Location Info: ' + df['Site_Street2'][row] +'; '
if df['Service_Type'][row] != '':
res = res + ' Service Type: ' + df['Service_Type'][row] +'; '
if df['Site_Hours'][row] != '':
res = res + 'Site Hours: ' + df['Site_Hours'][row] + '; '
if df['Comments'][row] != '':
res = res + 'Comments: ' + df['Comments'][row] + '; '
if df['Site_Instructions'][row] != '':
res = res + 'Site Instructions: ' + df['Site_Instructions'][row]
df['location_description'][row] = res
# Set some fields directly
df['source_org'] = 'Allegheny County'
df['source_file'] = source #os.path.basename(in_path)
df['type'] = "summer meal site"
df['latlng_source'] = df['source_org']
df['SNAP'] = 0
df['WIC'] = 0
df['FMNP'] = 0
# df['fresh_produce'] = 1
df['food_bucks'] = 0
df['free_distribution'] = 1
df['open_to_spec_group'] = 'children and teens 18 and younger'
df['data_issues'] = '' # start with blank field, to populate later
# Reorder and add any missing columns
df = df.reindex(columns = final_cols)
# Identify which columns we have handled
handled_cols = df.columns[~df.isna().all()] # i.e. columns that aren't all NA
# Detect and document missingness in handled columns
for col in handled_cols:
df.loc[df[col].isna(), 'data_issues'] += '{} missing;'.format(col)
# Detect some specific data issues
# df.loc[((df['latitude'] == 0) & (df['longitude'] == 0)), 'data_issues'] += 'latlng is (0,0);'
# Write out to CSV
df.to_csv(out_path, index = False)
|
'''define the config file for cityscapes and bisenetv2fp16'''
import os
from .base_cfg import *
# modify dataset config
DATASET_CFG = DATASET_CFG.copy()
DATASET_CFG.update({
'type': 'cityscapes',
'rootdir': os.path.join(os.getcwd(), 'CityScapes'),
})
DATASET_CFG['train']['aug_opts'] = [
('Resize', {'output_size': (2048, 1024), 'keep_ratio': True, 'scale_range': (0.5, 2.0)}),
('RandomCrop', {'crop_size': (1024, 1024), 'one_category_max_ratio': 0.75}),
('RandomFlip', {'flip_prob': 0.5}),
('PhotoMetricDistortion', {}),
('Normalize', {'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375]}),
('ToTensor', {}),
('Padding', {'output_size': (1024, 1024), 'data_type': 'tensor'}),
]
DATASET_CFG['test']['aug_opts'] = [
('Resize', {'output_size': (2048, 1024), 'keep_ratio': True, 'scale_range': None}),
('Normalize', {'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375]}),
('ToTensor', {}),
]
# modify dataloader config
DATALOADER_CFG = DATALOADER_CFG.copy()
# modify optimizer config
OPTIMIZER_CFG = OPTIMIZER_CFG.copy()
OPTIMIZER_CFG.update(
{
'type': 'sgd',
'sgd': {
'learning_rate': 0.05,
'momentum': 0.9,
'weight_decay': 5e-4,
'min_lr': 1e-4,
},
'max_epochs': 860,
'policy': {
'type': 'poly',
'opts': {'power': 0.9, 'max_iters': None, 'num_iters': None, 'num_epochs': None},
'warmup': {'type': 'linear', 'ratio': 0.1, 'iters': 1000},
},
}
)
# modify losses config
LOSSES_CFG = {
'loss_aux1': {
'celoss': {'scale_factor': 1.0, 'opts': {'ignore_index': 255, 'reduction': 'mean'}}
},
'loss_aux2': {
'celoss': {'scale_factor': 1.0, 'opts': {'ignore_index': 255, 'reduction': 'mean'}}
},
'loss_aux3': {
'celoss': {'scale_factor': 1.0, 'opts': {'ignore_index': 255, 'reduction': 'mean'}}
},
'loss_aux4': {
'celoss': {'scale_factor': 1.0, 'opts': {'ignore_index': 255, 'reduction': 'mean'}}
},
'loss_cls': {
'celoss': {'scale_factor': 1.0, 'opts': {'ignore_index': 255, 'reduction': 'mean'}}
},
}
# modify segmentor config
SEGMENTOR_CFG = SEGMENTOR_CFG.copy()
SEGMENTOR_CFG.update(
{
'num_classes': 19,
'fp16': {'is_on': True, 'opts': {'opt_level': 'O1'}},
'backbone': {
'type': None,
'series': 'bisenetv2',
'pretrained': False,
'selected_indices': (0, 1, 2, 3, 4),
},
'decoder': {
'in_channels': 128,
'out_channels': 1024,
'dropout': 0.1,
'num_convs': 1,
},
'auxiliary': [
{'in_channels': 16, 'out_channels': 16, 'dropout': 0.1, 'num_convs': 2},
{'in_channels': 32, 'out_channels': 64, 'dropout': 0.1, 'num_convs': 2},
{'in_channels': 64, 'out_channels': 256, 'dropout': 0.1, 'num_convs': 2},
{'in_channels': 128, 'out_channels': 1024, 'dropout': 0.1, 'num_convs': 2},
],
}
)
# modify inference config
INFERENCE_CFG = INFERENCE_CFG.copy()
# modify common config
COMMON_CFG = COMMON_CFG.copy()
COMMON_CFG['train'].update(
{
'backupdir': 'fcn_bisenetv2fp16_cityscapes_train',
'logfilepath': 'fcn_bisenetv2fp16_cityscapes_train/train.log',
}
)
COMMON_CFG['test'].update(
{
'backupdir': 'fcn_bisenetv2fp16_cityscapes_test',
'logfilepath': 'fcn_bisenetv2fp16_cityscapes_test/test.log',
'resultsavepath': 'fcn_bisenetv2fp16_cityscapes_test/fcn_bisenetv2fp16_cityscapes_results.pkl'
}
) |
# import datetime
# import os
# import unittest
# from decompy.DataGathering.FileGetter import FileGetter
# import shutil
# import json
#
#
# class GitHubScraperTest(unittest.TestCase):
#
# def test_repo_vc_1_download_config_META_create_and_update(self):
# """
# Tests if the repo.json download time timestamp is approximately correct when appending
# :return: nothing
# """
#
# repo = "Decompy_valid_and_compilable_1"
# repo_json = os.path.join(repo, "repo.json")
# url = "https://github.com/DecomPy/valid_and_compilable_1"
#
# FileGetter.download_all_files(url)
#
# # make dir and file
# if not os.path.exists(repo):
# os.mkdir(repo)
#
# # write new values
# with open(repo_json, "w+") as json_file:
# dump_me = {
# "name": repo,
# "author": "some guy",
# "url": url,
# "master_download_date": int(datetime.datetime.today().strftime('%M'))
# }
# json.dump(dump_me, json_file)
#
# # read values and confirm
# with open(repo_json, "r") as json_file:
# json_data = json.load(json_file)
#
# file_minute = json_data["master_download_date"]
# minute = datetime.datetime.today().strftime('%M')
#
# minute = int(minute)
# json_data["master_download_date"] = minute
#
# self.assertTrue(file_minute == minute or file_minute == ((minute + 1) % 60)
# or file_minute == ((minute - 1) % 60))
#
# # write values back then confirm
# with open(repo_json, "w+") as json_file:
# json.dump(json_data, json_file)
#
# # final confirmation
# with open(repo_json, "r") as json_file:
# json_data = json.load(json_file)
#
# file_minute = json_data["master_download_date"]
# minute = datetime.datetime.today().strftime('%M')
#
# minute = int(minute)
# json_data["master_download_date"] = minute
#
# self.assertTrue(file_minute == minute or file_minute == ((minute + 1) % 60)
# or file_minute == ((minute - 1) % 60))
#
# # Makes sure the directory is always clean
# if os.path.exists(repo):
# shutil.rmtree(repo)
#
# def test_repo_iu_1_doesnt_download(self):
# """
# Tests that a repository that is not supposed to be downloaded are not downloaded
# :return: nothing
# """
# FileGetter.download_all_files("https://github.com/DecomPy/invalid_and_uncompilable_1")
#
# file_count = 0
# for root, dirs, files in os.walk("Decompy_invalid_and_uncompilable_1/Unfiltered"):
# for _ in files:
# file_count += 1
# print(file_count)
# self.assertTrue(file_count == 0)
#
# if os.path.exists("Decompy_invalid_and_uncompilable_1"):
# shutil.rmtree("Decompy_invalid_and_uncompilable_1")
#
# def test_repo_vc_1_does_download(self):
# """
# Tests that a repository with valid files are downloaded
# :return: nothing
# """
#
# FileGetter.download_all_files("https://github.com/DecomPy/valid_and_compilable_1")
# file_count = 0
# for root, dirs, files in os.walk("Decompy_valid_and_compilable_1/Unfiltered"):
# for _ in files:
# file_count += 1
# print(file_count)
#
# # I have no idea why the following line doesn't work on TravisCI
# # self.assertTrue(file_count == 2)
#
# # Makes sure the directory is always clean
# if os.path.exists("DecomPy_valid_and_compilable_1"):
# shutil.rmtree("DecomPy_valid_and_compilable_1")
#
# def test_repo_vc_1_does_download_custom_directory(self):
# """
# Tests that a repository with valid files are downloaded into a specified directory
# :return: nothing
# """
#
# FileGetter.download_all_files("https://github.com/DecomPy/valid_and_compilable_1", "test_dir")
# file_count = 0
# for root, dirs, files in os.walk("test_dir/Unfiltered"):
# for _ in files:
# file_count += 1
# print(file_count)
# self.assertTrue(file_count == 2)
#
# # Makes sure the directory is always clean
# if os.path.exists("test_dir"):
# shutil.rmtree("test_dir")
#
# def test_UTF_in_file_name(self):
# """
# Tests that getting files that have UTF characters doesn't crash the program
# :return:
# """
#
# # This will cause a problem if unable to handle UTF characters like 解
# FileGetter.download_all_files("https://github.com/swiftchao/mzzopublic")
#
# # Clean up directory
# if os.path.exists("swiftchao_mzzopublic"):
# shutil.rmtree("swiftchao_mzzopublic")
#
# @classmethod
# def setUp(cls):
# """
# Clean up directory before running any test
# :return:
# """
#
# if os.path.exists("DecomPy_valid_and_compilable_1"):
# shutil.rmtree("DecomPy_valid_and_compilable_1")
#
# @classmethod
# def tearDown(cls):
# """
# Cleans up directory after running all tests
# :return: nothing
# """
# if os.path.exists("DecomPy_valid_and_compilable_1"):
# shutil.rmtree("DecomPy_valid_and_compilable_1")
# if os.path.exists("DecomPy_invalid_and_uncompilable_1"):
# shutil.rmtree("DecomPy_invalid_and_uncompilable_1")
# if os.path.isfile("DecomPy_valid_and_compilable_1repo.json"): # For when running test from linux system
# os.remove("DecomPy_valid_and_compilable_1repo.json")
#
#
# if __name__ == '__main__':
# unittest.main()
|
from rltorch.algs.PPOLSTM.agent import PPOLSTMAgent
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--env_name", type=str, default='CartPole-v0')
parser.add_argument("--hidden_size", type=int, default=32)
parser.add_argument("--actor_lr", type=float, default=3e-4)
parser.add_argument("--critic_lr", type=float, default=1e-3)
parser.add_argument("--epoch_steps", type=int, default=4000)
parser.add_argument("--epochs", type=int, default=50)
parser.add_argument("--max_ep_len", type=int, default=1000)
parser.add_argument("--gamma", type=float, default=0.99)
parser.add_argument("--lam", type=float, default=0.97)
parser.add_argument("--clip_ratio", type=float, default=0.2)
parser.add_argument("--target_kl", type=float, default=0.01)
parser.add_argument("--train_actor_iters", type=int, default=80)
parser.add_argument("--train_critic_iters", type=int, default=80)
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--model_save_freq", type=int, default=50)
args = parser.parse_args()
agent = PPOLSTMAgent(**vars(args))
agent.train()
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from .composition_parts import Component
from .composition_parts import Identifier
from .make_copy import make_copy
class MakeCopyTest(unittest.TestCase):
def test_primitives(self):
self.assertEqual(None, make_copy(None))
self.assertEqual(True, make_copy(True))
self.assertEqual(False, make_copy(False))
self.assertEqual(42, make_copy(42))
self.assertEqual(3.14, make_copy(3.14))
self.assertEqual('abc', make_copy('abc'))
def test_primitives_subclasses(self):
# Identifier and Component are subclasses of str. Copies of them
# shouldn't change their type.
original = (Identifier('x'), Component('x'))
copy = make_copy(original)
self.assertEqual(original[0], copy[0])
self.assertEqual(original[1], copy[1])
self.assertIsInstance(copy[0], Identifier)
self.assertIsInstance(copy[1], Component)
def test_object_identity(self):
# A diamond structure must be preserved when making a copy.
# /--> B --\
# A --> D
# \--> C --/
# A1->B1, A1->C1, B1->D1, C1->D1 will be copied as;
# A2->B2, A2->C2, B2->D2, C2->D2 where X2 is a copy of X1.
class Obj(object):
pass
class Ref(object):
def __init__(self, value=None):
self.value = value
obj = Obj()
ref1 = Ref(obj)
ref2 = Ref(obj)
self.assertNotEqual(ref1, ref2)
self.assertIs(ref1.value, ref2.value)
copy = make_copy((ref1, ref2))
self.assertIsInstance(copy, tuple)
self.assertIsInstance(copy[0], Ref)
self.assertIsInstance(copy[1], Ref)
self.assertIsNot(copy[0], copy[1])
self.assertIs(copy[0].value, copy[1].value)
|
import discord
from discord.ext import commands
import random
class EightBall(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(aliases=['8ball', '8Ball'])
async def _8ball(self, ctx, *, question):
responses =['It is certain.','It is decidedly so.','Without a doubt.',
'Yes – definitely.','You may rely on it.','As I see it, yes.',
'Most likely.','Outlook good.','Yes.',
'Signs point to yes.',' Reply hazy, try again.','Ask again later.',
' Better not tell you now.',' Cannot predict now.','Concentrate and ask again.',
'Don\'t count on it.',' My reply is no.',' My sources say no.',
'Outlook not so good.','Very doubtful.']
if '.flag' in question:
await ctx.channel.purge(limit=1)
return
if 'csictf{' in question:
await ctx.channel.purge(limit=1)
await ctx.send('Oh no no! Don\'t post flags here.')
else:
await ctx.send(f'Question: {question}\nAnswer: {random.choice(responses)}')
def setup(client):
client.add_cog(EightBall(client)) |
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
from std_msgs.msg import ColorRGBA
class RobotShapesContainer:
def __init__(self):
self.shapes = {}
self.positions = {}
self.last_markers = {}
def robot_ids(self):
return [id for id in self.shapes]
def setRobotShape(self, _robot_id, _shape):
self.shapes[_robot_id] = _shape
self.positions[_robot_id] = (0, 0, 0)
def updateRobotPosition(self, _robot_id, _position):
self.positions[_robot_id] = _position
print(_position)
def toMarkerArray(self, colors, frame_id = "map"):
markers = []
for rid, shape in self.shapes.items():
markers.append(shape.toMarker(self.positions[rid], frame_id, colors[rid]))
self.last_markers = {}
for marker in markers:
self.last_markers[marker.id] = marker
return markers |
import subprocess
import unittest
from threading import Timer
class ApplicationTest(unittest.TestCase):
PROMPT = "> "
TIMEOUT = 2
def setUp(self):
self.proc = subprocess.Popen(
["python", "-m", "task_list"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True,
)
self.timer = Timer(self.TIMEOUT, self.proc.kill)
self.timer.start()
def tearDown(self):
self.timer.cancel()
self.proc.stdout.close()
self.proc.stdin.close()
while self.proc.returncode is None:
self.proc.poll()
def test_it_works(self):
self.execute("view by project")
self.execute("add project secrets")
self.execute("add task secrets 1 Eat more donuts.")
self.execute("add task secrets 2 Destroy all humans.")
self.execute("view by project")
self.read_lines(
"secrets", " [ ] 1: Eat more donuts.", " [ ] 2: Destroy all humans.", ""
)
self.execute("add project training")
self.execute("add task training 3 Four Elements of Simple Design")
self.execute("add task training 4 SOLID")
self.execute("add task training 5 Coupling and Cohesion")
self.execute("add task training 6 Primitive Obsession")
self.execute("add task training 7 Outside-In TDD")
self.execute("add task training 8 Interaction-Driven Design")
self.execute("check 1")
self.execute("check 3")
self.execute("check 5")
self.execute("check 6")
self.execute("view by project")
self.read_lines(
"secrets",
" [x] 1: Eat more donuts.",
" [ ] 2: Destroy all humans.",
"",
"training",
" [x] 3: Four Elements of Simple Design",
" [ ] 4: SOLID",
" [x] 5: Coupling and Cohesion",
" [x] 6: Primitive Obsession",
" [ ] 7: Outside-In TDD",
" [ ] 8: Interaction-Driven Design",
"",
)
self.execute("quit")
def execute(self, command):
self.write(command + "\n")
def write(self, command):
self.read(self.PROMPT)
self.proc.stdin.write(command)
self.proc.stdin.flush()
def read(self, expected_output):
output = self.proc.stdout.read(len(expected_output))
self.assertEqual(expected_output, output)
def read_lines(self, *lines):
for line in lines:
self.read(line + "\n")
|
# -*- coding: utf-8 -*-
"""
py_vollib.ref_python.black.implied_volatility
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A library for option pricing, implied volatility, and
greek calculation. py_vollib is based on lets_be_rational,
a Python wrapper for LetsBeRational by Peter Jaeckel as
described below.
:copyright: © 2017 Gammon Capital LLC
:license: MIT, see LICENSE for more details.
py_vollib.ref_python is a pure python version of py_vollib without any dependence on LetsBeRational. It is provided purely as a reference implementation for sanity checking. It is not recommended for industrial use.
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
"""
# -----------------------------------------------------------------------------
# IMPORTS
# Standard library imports
# Related third party imports
from scipy.optimize import brentq
# Local application/library specific imports
from py_vollib.ref_python.black import black
# -----------------------------------------------------------------------------
# FUNCTIONS - IMPLIED VOLATILITY
def implied_volatility(price, F, K, r, t, flag):
"""Returns the Black delta of an option.
:param price:
:type price: float
:param F: underlying futures price
:type F: float
:param K: strike price
:type K: float
:param r: annual risk-free interest rate
:type r: float
:param t: time to expiration in years
:type t: float
:param flag: 'c' or 'p' for call or put.
:type flag: str
:returns: float
>>> F = 101.0
>>> K = 102.0
>>> t = .5
>>> r = .01
>>> flag = 'p'
>>> sigma_in = 0.2
>>> price = black(flag, F, K, t, r, sigma_in)
>>> expected_price = 6.20451158097
>>> abs(expected_price - price) < 0.00001
True
>>> sigma_out = implied_volatility(price, F, K, r, t, flag)
>>> sigma_in == sigma_out or abs(sigma_in - sigma_out) < 0.00001
True
>>> F = 100
>>> K = 100
>>> sigma = .2
>>> flag = 'c'
>>> t = .5
>>> r = .02
>>> discounted_call_price = black(flag, F, K, t, r, sigma)
>>> iv = implied_volatility(discounted_call_price, F, K, r, t, flag)
>>> expected_discounted_call_price = 5.5811067246
>>> expected_iv = 0.2
>>> abs(expected_discounted_call_price - discounted_call_price) < 0.00001
True
>>> abs(expected_iv - iv) < 0.00001
True
"""
f = lambda sigma: price - black(flag, F, K, t, r, sigma)
return brentq(
f,
a=1e-12,
b=100,
xtol=1e-15,
rtol=1e-15,
maxiter=1000,
full_output=False
)
if __name__ == "__main__":
from py_vollib.helpers.doctest_helper import run_doctest
run_doctest()
|
import pygame
from model.Bar import Bar
from model.BarVertical import BarVertical
class BarRight(BarVertical):
def __init__(self, screen_size, max_speed):
BarVertical.__init__(self, screen_size, max_speed)
self.surface = pygame.image.load('imgs/bar_right.png')
self.rect = self.surface.get_rect()
self.init_position()
def init_position(self):
self.rect.left = self.screen_size[0] - self.rect.width
self.rect.top = (self.screen_size[1] - self.rect.height) / 2
def on_event(self, event):
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
self.status = Bar.NEGATIVE
self.last_key = pygame.K_UP
if event.key == pygame.K_DOWN:
self.status = Bar.POSITIVE
self.last_key = pygame.K_DOWN
elif event.type == pygame.KEYUP:
if (self.last_key == pygame.K_UP and event.key == pygame.K_UP) or (
self.last_key == pygame.K_DOWN and event.key == pygame.K_DOWN):
self.status = Bar.IDLE
def check_boundary(self):
if self.rect.top < self.rect.width:
self.rect.top = self.rect.width
self.status = Bar.IDLE
if self.rect.bottom > self.screen_size[1] - self.rect.width:
self.rect.bottom = self.screen_size[1] - self.rect.width
self.status = Bar.IDLE
def check_collision(self, ball):
if self.rect.colliderect(ball.rect):
ball.speed[0] = -abs(ball.speed[0])
ball.speed[1] += self.speed * 0.1
|
# -*- coding: utf-8 -*-
import pandas as pd
import csv
dataset_file1 = open('survey_sample.csv','r')
dataset = pd.read_csv(dataset_file1)
#dataset = dataset.loc[:,['question_id','answer','question_entity_label','question_relation_label']]
dataset['triple'] = ""
for index, row in dataset.iterrows():
triple = "<" + row['question_entity_label'] + "> <" + row['question_relation_label'] + "> <" + row['answer'] + ">"
dataset.set_value(index,'triple',triple)
dataset = dataset.drop(['index','answer_sentence','answer','question_entity_label','question_relation_label'], axis = 1)
dataset.rename(columns = {'question_id':'UID'}, inplace =True)
print(dataset.shape)
index = len(dataset.index)
tab1 = dataset.iloc[:int(index/2)]
tab1.rename(columns = {'UID':'UID1','triple':'Triple1'},inplace=True)
tab1 = tab1.reset_index(drop=True)
#print (list(tab1.columns))
print(tab1.head(3))
tab2 = dataset.iloc[int(index/2):]
tab2.rename(columns = {'UID':'UID2','triple':'Triple2'},inplace=True)
tab2 = tab2.reset_index(drop=True)
#print (list(tab2.columns))
print(tab2.head(3))
final_tab = tab1.join(tab2)
print (list(final_tab.columns))
final_tab.to_csv('survey_AMT.csv', index_label = 'index', encoding = 'utf-8',quoting = csv.QUOTE_ALL) |
import json
import pkgutil
import os
import logging
import logging.config
import yaml
global Z_CLOUDS, DEBUG_DEFAULT, MAX_FQDN_LEN, MAX_PSK_LEN, REQUEST_TIMEOUTS
DEBUG_DEFAULT = False
MAX_FQDN_LEN = 255
Z_CLOUDS = {
'zscaler': 'https://admin.zscaler.net/',
'zscloud': 'https://admin.zscloud.net/',
'zscalerone': 'https://admin.zscalerone.net/',
'zscalertwo': 'https://admin.zscalertwo.net/',
'zscalerthree': 'https://admin.zscalerthree.net/',
'betacloud': 'https://admin.zscalerbeta.net/'
}
USER_CONFIG_FILE = './config.yaml'
def load_config():
global _CONFIG
_CONFIG = yaml.safe_load(pkgutil.get_data(
__package__, 'data/config.yaml').decode('utf-8'))
if os.path.exists(USER_CONFIG_FILE):
with open(USER_CONFIG_FILE) as configfile:
_CONFIG = yaml.safe_load(configfile.read())
if 'log' in _CONFIG:
logging.config.dictConfig(_CONFIG['log'])
def get_config():
if _CONFIG is None:
load_config()
return _CONFIG
class RequestError(Exception):
def __init__(self, method, path, body, error):
self.method = method
self.path = path
self.body = body
self.code = error['code']
self.message = error['message']
class SessionTimeoutError(RequestError):
pass
class AuthenticationError(RequestError):
pass
class ZiaApiBase(object):
def __init__(self, session):
self._session = session
|
from django import forms
from equipment.models import Item
class CreateItemForm(forms.ModelForm):
class Meta:
model = Item
fields = [
"kind",
"person",
"code",
"brand",
"specifications",
"series_number",
"state",
"registered_date",
"return_date",
] |
# Code taken from: https://explore-flask.readthedocs.io/en/latest/views.html
# Specify part of the URL to be converted into Python List of Integer
from werkzeug.routing import BaseConverter
class ListConverter(BaseConverter):
def to_python(self, value):
return list(map(int, value.split('+')))
def to_url(self, values):
return '+'.join(BaseConverter.to_url(value)
for value in values)
|
from flask import render_template, request
def index():
return render_template(
'index.html', email=request.cookies.get('email')
)
|
# -*- coding: utf-8 -*-
def includeme(config):
# Register the transform_annotation subscriber so that nipsa fields are
# written into annotations on save.
config.add_subscriber('h.nipsa.subscribers.transform_annotation',
'h.events.AnnotationTransformEvent')
# Register an additional filter with the API search module
config.memex_add_search_filter('h.nipsa.search.Filter')
|
c=0
for i in range(1,10):
#print(i)
#if c==i%(y=int(z) for z in range(2,10)):
if 0==i%z: for z in range(3,i-1))
#print(i%z)
print(i)
else:
pass
|
"""
Usage example for the decode_video python op.
"""
from __future__ import absolute_import
from __future__ import print_function
import time
import argparse
import numpy as np
import tensorflow as tf
from py_ops import decode_video
def _parse_arguments():
parser = argparse.ArgumentParser('Test decode_video python op.')
parser.add_argument('--input_file', help='Path to the video file.')
parser.add_argument('--output_file', default=None,
help='(Optional) Path to the .npy file where the decoded frames will be stored.')
parser.add_argument('--play_video', default=False, action='store_true',
help='Play the extracted frames.')
parser.add_argument('--num_frames', default=30, type=int,
help='Number of frames per video (sequence length). Set to 0 for full video.')
parser.add_argument('--fps', default=-1, type=int,
help='Framerate to which the input videos are converted. Use -1 for the original framerate.')
parser.add_argument('--random_chunks', default=False, action='store_true',
help='Grab video frames starting from a random position.')
return parser.parse_args()
def _show_video(video, fps=10):
# Import matplotlib/pylab only if needed
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pylab as pl
pl.style.use('ggplot')
pl.axis('off')
if fps < 0:
fps = 25
video /= 255. # Pylab works in [0, 1] range
img = None
pause_length = 1. / fps
try:
for f in range(video.shape[0]):
im = video[f, :, :, :]
if img is None:
img = pl.imshow(im)
else:
img.set_data(im)
pl.pause(pause_length)
pl.draw()
except:
pass
if __name__ == '__main__':
args = _parse_arguments()
sess = tf.Session()
f = tf.placeholder(tf.string)
video, h, w, seq_length = decode_video(f, args.num_frames, args.fps, args.random_chunks)
start_time = time.time()
frames, seq_length_val = sess.run([video, seq_length], feed_dict={f: args.input_file})
total_time = time.time() - start_time
print('\nSuccessfully loaded video!\n'
'\tDimensions: %s\n'
'\tTime: %.3fs\n'
'\tLoaded frames: %d\n' %
(str(frames.shape), total_time, seq_length_val))
if args.output_file:
np.save(args.output_file, frames)
print("Stored frames to %s" % args.output_file)
if args.play_video:
_show_video(frames, args.fps)
|
# Copyright 2020 Huawei Technologies Co., Ltd.All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Define ONNX graph."""
from typing import Dict, NoReturn
from mindinsight.mindconverter.common.log import logger as log
from .base import Graph
from .input_node import InputNode
from .onnx_graph_node import OnnxGraphNode
from .tf_graph_parser import TFGraphParser
from .onnx_utils import OnnxDataLoader
NONE_SCOPE_OP = {
"onnx::Add": "Add",
"onnx::Flatten": "Flatten",
"onnx::Concat": "Concat",
"onnx::Squeeze": "Squeeze",
"onnx::Unsqueeze": "Unsqueeze",
}
def normalize_node_name(node):
"""
Rename the node name by removing :0
Args:
node (Node, str): ONNX node instance or node name string.
Returns:
str, normalized node name.
"""
if isinstance(node, str):
return node.split(':')[0]
return node.name.split(':')[0]
class OnnxGraph(Graph):
"""
Define ONNX graph.
Args:
model (onnx.ModelProto): Onnx defined model proto.
sample_shape (tuple): Input shape of the model.
"""
def __init__(self, model, sample_shape: tuple = None, **kwargs):
super(OnnxGraph, self).__init__(model=model, **kwargs)
self.build(sample_shape)
@staticmethod
def _extract_shape(shape):
"""
Extract shape from string-type shape.
Args:
shape (str): Shape value in string-type.
Returns:
list, shape.
"""
if "," not in shape:
return []
shape_arr = []
for s in shape.split(","):
s = s.strip()
if not s:
return []
if ":" in s:
s = s.split(":")[0]
s = s.replace("!", "")
if not s.isdigit():
return []
shape_arr.append(int(s))
return shape_arr
def _build_connection(self, src, tgt) -> NoReturn:
"""
Build connection between source node and target node.
Args:
src (str): Source node name.
tgt (str): Target node name.
"""
# If src and tgt are the same node, src not in node_collection or
# tgt not in node_collection, then skip this edge.
src = normalize_node_name(src)
tgt = normalize_node_name(tgt)
if src == tgt or src not in self._nodes_collection or tgt not in self._nodes_collection:
if src.split(':')[0] not in self._nodes_collection:
log.warning(
"Graph construct a self-loop node %s. Ignored.", src)
return
if tgt not in self._nodes_collection[src.split(':')[0]].successor_nodes:
self._nodes_collection[src.split(':')[0]].successor_nodes.append(tgt)
if src not in self._nodes_collection[tgt].precursor_nodes:
self._nodes_collection[tgt.split(':')[0]].precursor_nodes.append(src)
def build(self, input_shape=None):
"""
Build graph tree.
Args:
input_shape (tuple): Input shape of model. Default: None
"""
model_data = OnnxDataLoader(self.model, graph_input_shape=input_shape,
input_nodes=self._raw_input_nodes,
output_nodes=self._raw_output_nodes)
from ..sub_graph_searcher import generate_scope_name
scope_name_list = generate_scope_name(model_data)
self._shape_dict = model_data.node_output_shape_dict
for ind, (node_name, node) in enumerate(model_data.nodes_dict.items()):
node_weight = {}
node.scope_name = scope_name_list[ind]
inputs = node.input_name_list
# check each input from node or tensors
for i in inputs:
if i in model_data.tensors_dict:
tensor = model_data.tensors_dict[i]
t_name = tensor.name
t_value = tensor.to_array()
node_weight[t_name] = t_value
self._nodes_collection[node_name] = OnnxGraphNode(node, node_weight)
self._nodes_record[node_name] = node_name
for nd_ipt_name in node.precursor_onnx_node_dict:
self._build_connection(nd_ipt_name, node_name)
super(OnnxGraph, self).build(input_shape=input_shape)
self._collect_input_shape_of_each_node(input_shape)
def _collect_input_shape_of_each_node(self, input_shape):
"""
Collect input tensor shape of each node.
Args:
input_shape (tuple): Input shape.
"""
input_node = InputNode(input_shape)
input_node_name = self._raw_input_nodes.replace(":0", "")
for node_name, node in self._nodes_collection.items():
if node_name in self._input_nodes:
ipt_nd_name = input_node_name.format(input_node.scope_name)
input_node.set_scope_name(node.scope_name)
node.precursor_nodes.insert(0, ipt_nd_name)
input_node.set_successor_nodes(node_name)
self._shape_dict[ipt_nd_name] = input_node.output_shape
ipt_shape = []
for p_nd in node.precursor_nodes:
shp = self._shape_dict.get(p_nd)
ipt_shape.append(tuple(shp) if isinstance(shp, list) else shp)
self._input_shape[node_name] = ipt_shape[0] if len(
ipt_shape) == 1 else ipt_shape
def sub_graph_merging(self):
raise NotImplementedError()
@staticmethod
def load_checkpoint(ckpt_path: str) -> Dict:
raise NotImplementedError()
@staticmethod
def load_metadata(**kwargs):
raise NotImplementedError()
@staticmethod
def load_graph(graph_path: str, **kwargs):
"""
Load graph.
Note:
The input/output nodes are optional for
tf saved model format. But required for .pb & .ckpt
Args:
graph_path (str): Graph path.
Returns:
object, ONNX model.
"""
tf_input_nodes = kwargs.get('input_nodes')
tf_output_nodes = kwargs.get('output_nodes')
onnx_model = TFGraphParser.parse(graph_path,
input_nodes=tf_input_nodes,
output_nodes=tf_output_nodes)
return onnx_model
|
#!/usr/bin/env python3
import numpy as np
import pickle
from scipy.spatial.transform import Rotation
def load_poses(path):
f = open(path + '/ep_data.pkl', 'rb')
data = pickle.load(f)
gt_mat = np.zeros([0, 4, 4])
gt_obj_pose_mat = np.eye(4)
quat = data['obj_world_pose'][3:]
quat[0], quat[1], quat[2], quat[3] = quat[1], quat[2], quat[3], quat[0]
gt_obj_pose_mat[:3, :3] = (Rotation.from_quat(quat) * Rotation.from_euler('xyz', [0, np.pi, 0])).as_dcm()
gt_obj_pose_mat[:3, 3] = data['obj_world_pose'][:3]
gt_mat = np.concatenate([gt_mat, gt_obj_pose_mat[None, :]], axis=0)
for ind in range(len(data['cam_pose'])):
pose_mat = np.eye(4)
quat = data['cam_pose'][ind][3:]
quat[0], quat[1], quat[2], quat[3] = quat[1], quat[2], quat[3], quat[0]
pose_mat[:3, :3] = (Rotation.from_quat(quat) * Rotation.from_euler('xyz', [0, np.pi, 0])).as_dcm()
pose_mat[:3, 3] = data['cam_pose'][ind][:3]
gt_mat = np.concatenate([gt_mat, pose_mat[None, :]], axis=0)
return gt_mat[0], gt_mat[1:]
def save_poses(obj_pose, cam_poses, path):
f = open(path + '/obj_det_poses.pkl', 'wb')
data = {}
for ind, cam_pose in enumerate(cam_poses):
diff = np.linalg.inv(cam_pose) @ obj_pose
R = diff[:3, :3]
t = diff[:3, 3]
R = Rotation.from_euler('xyz', np.random.normal(0, 0.1, [3])).as_dcm() @ R
t += np.random.normal(0, 0.05, [3])
quat = Rotation.from_dcm(R).as_quat()
quat[1], quat[2], quat[3], quat[0] = quat[0], quat[1], quat[2], quat[3]
total_vec = np.hstack([t, quat])
data[str(ind) + '.png'] = total_vec
pickle.dump(data, f)
if __name__ == '__main__':
obj_pose, cam_poses = load_poses('../data/sim/ep_2')
save_poses(obj_pose, cam_poses, '../data/sim/ep_2') |
from functools import partial
from collections import Sequence
import os
from pathlib import Path
import random
from ..utils import _norm_path
from menpo.base import menpo_src_dir_path, LazyList
from menpo.visualize import print_progress
def data_dir_path():
r"""A path to the Menpo built in ./data folder on this machine.
Returns
-------
``pathlib.Path``
The path to the local Menpo ./data folder
"""
return menpo_src_dir_path() / 'data'
def data_path_to(asset_filename):
r"""
The path to a builtin asset in the ./data folder on this machine.
Parameters
----------
asset_filename : `str`
The filename (with extension) of a file builtin to Menpo. The full
set of allowed names is given by :func:`ls_builtin_assets()`
Returns
-------
data_path : `pathlib.Path`
The path to a given asset in the ./data folder
Raises
------
ValueError
If the asset_filename doesn't exist in the `data` folder.
"""
asset_path = data_dir_path() / asset_filename
if not asset_path.is_file():
raise ValueError("{} is not a builtin asset: {}".format(
asset_filename, ls_builtin_assets()))
return asset_path
def same_name(path):
r"""
Menpo's default image landmark resolver. Returns all landmarks found to have
the same stem as the asset.
"""
# pattern finding all landmarks with the same stem
pattern = path.with_suffix('.*')
# find all the landmarks we can with this name. Key is ext (without '.')
return {p.suffix[1:].upper(): p for p in landmark_file_paths(pattern)}
def same_name_video(path, frame_number):
r"""
Menpo's default video landmark resolver. Returns all landmarks found to have
the same stem as the asset.
"""
# pattern finding all landmarks with the same stem
pattern = path.with_name('{}_{}.*'.format(path.stem, frame_number))
# find all the landmarks we can with this name. Key is ext (without '.')
return {p.suffix[1:].upper(): p for p in landmark_file_paths(pattern)}
def import_image(filepath, landmark_resolver=same_name, normalise=True):
r"""Single image (and associated landmarks) importer.
If an image file is found at `filepath`, returns an :map:`Image` or
subclass representing it. By default, landmark files sharing the same
filename stem will be imported and attached with a group name based on the
extension of the landmark file, although this behavior can be customised
(see `landmark_resolver`). If the image defines a mask, this mask will be
imported.
Parameters
----------
filepath : `pathlib.Path` or `str`
A relative or absolute filepath to an image file.
landmark_resolver : `function`, optional
This function will be used to find landmarks for the
image. The function should take one argument (the path to the image) and
return a dictionary of the form ``{'group_name': 'landmark_filepath'}``
Default finds landmarks with the same name as the image file.
normalise : `bool`, optional
If ``True``, normalise the image pixels between 0 and 1 and convert
to floating point. If false, the native datatype of the image will be
maintained (commonly `uint8`). Note that in general Menpo assumes
:map:`Image` instances contain floating point data - if you disable
this flag you will have to manually convert the images you import to
floating point before doing most Menpo operations. This however can be
useful to save on memory usage if you only wish to view or crop images.
Returns
-------
images : :map:`Image` or list of
An instantiated :map:`Image` or subclass thereof or a list of images.
"""
kwargs = {'normalise': normalise}
return _import(filepath, image_types,
landmark_ext_map=image_landmark_types,
landmark_resolver=landmark_resolver,
landmark_attach_func=_import_object_attach_landmarks,
importer_kwargs=kwargs)
def import_video(filepath, landmark_resolver=same_name_video, normalise=True,
importer_method='ffmpeg'):
r"""Single video (and associated landmarks) importer.
If a video file is found at `filepath`, returns an :map:`LazyList` wrapping
all the frames of the video. By default, landmark files sharing the same
filename stem will be imported and attached with a group name based on the
extension of the landmark file appended with the frame number, although this
behavior can be customised (see `landmark_resolver`).
Parameters
----------
filepath : `pathlib.Path` or `str`
A relative or absolute filepath to a video file.
landmark_resolver : `function`, optional
This function will be used to find landmarks for the
video. The function should take two arguments (the path to the video and
the frame number) and return a dictionary of the form ``{'group_name':
'landmark_filepath'}`` Default finds landmarks with the same name as the
video file, appended with '_{frame_number}'.
normalise : `bool`, optional
If ``True``, normalise the frame pixels between 0 and 1 and convert
to floating point. If ``False``, the native datatype of the image will
be maintained (commonly `uint8`). Note that in general Menpo assumes
:map:`Image` instances contain floating point data - if you disable this
flag you will have to manually convert the farmes you import to floating
point before doing most Menpo operations. This however can be useful to
save on memory usage if you only wish to view or crop the frames.
importer_method : {'ffmpeg', 'avconv'}, optional
A string representing the type of importer to use, by default ffmpeg
is used.
Returns
-------
frames : :map:`LazyList`
An lazy list of :map:`Image` or subclass thereof which wraps the frames
of the video. This list can be treated as a normal list, but the frame
is only read when the video is indexed or iterated.
"""
kwargs = {'normalise': normalise}
video_importer_methods = {'ffmpeg': ffmpeg_video_types}
if importer_method not in video_importer_methods:
raise ValueError('Unsupported importer method requested. Valid values '
'are: {}'.format(video_importer_methods.keys()))
return _import(filepath, video_importer_methods[importer_method],
landmark_ext_map=image_landmark_types,
landmark_resolver=landmark_resolver,
landmark_attach_func=_import_lazylist_attach_landmarks,
importer_kwargs=kwargs)
def import_landmark_file(filepath, asset=None):
r"""Single landmark group importer.
If a landmark file is found at ``filepath``, returns a
:map:`LandmarkGroup` representing it.
Parameters
----------
filepath : `pathlib.Path` or `str`
A relative or absolute filepath to an landmark file.
Returns
-------
landmark_group : :map:`LandmarkGroup`
The :map:`LandmarkGroup` that the file format represents.
"""
return _import(filepath, image_landmark_types, asset=asset)
def import_pickle(filepath):
r"""Import a pickle file of arbitrary Python objects.
Menpo unambiguously uses ``.pkl`` as it's choice of extension for Pickle
files. Menpo also supports automatic importing and exporting of gzip
compressed pickle files - just choose a ``filepath`` ending ``pkl.gz`` and
gzip compression will automatically be applied. Compression can massively
reduce the filesize of a pickle file at the cost of longer import and
export times.
Parameters
----------
filepath : `pathlib.Path` or `str`
A relative or absolute filepath to a ``.pkl`` or ``.pkl.gz`` file.
Returns
-------
object : `object`
Whatever Python objects are present in the Pickle file
"""
return _import(filepath, pickle_types)
def import_images(pattern, max_images=None, shuffle=False,
landmark_resolver=same_name, normalise=True,
as_generator=False, verbose=False):
r"""Multiple image (and associated landmarks) importer.
For each image found creates an importer than returns a :map:`Image` or
subclass representing it. By default, landmark files sharing the same
filename stem will be imported and attached with a group name based on the
extension of the landmark file, although this behavior can be customised
(see `landmark_resolver`). If the image defines a mask, this mask will be
imported.
Note that this is a function returns a :map:`LazyList`. Therefore, the
function will return immediately and indexing into the returned list
will load an image at run time. If all images should be loaded, then simply
wrap the returned :map:`LazyList` in a Python `list`.
Parameters
----------
pattern : `str`
A glob path pattern to search for images. Every image found to match
the glob will be imported one by one. See :map:`image_paths` for more
details of what images will be found.
max_images : positive `int`, optional
If not ``None``, only import the first ``max_images`` found. Else,
import all.
shuffle : `bool`, optional
If ``True``, the order of the returned images will be randomised. If
``False``, the order of the returned images will be alphanumerically
ordered.
landmark_resolver : `function`, optional
This function will be used to find landmarks for the
image. The function should take one argument (the image itself) and
return a dictionary of the form ``{'group_name': 'landmark_filepath'}``
Default finds landmarks with the same name as the image file.
normalise : `bool`, optional
If ``True``, normalise the image pixels between 0 and 1 and convert
to floating point. If false, the native datatype of the image will be
maintained (commonly `uint8`). Note that in general Menpo assumes
:map:`Image` instances contain floating point data - if you disable
this flag you will have to manually convert the images you import to
floating point before doing most Menpo operations. This however can be
useful to save on memory usage if you only wish to view or crop images.
as_generator : `bool`, optional
If ``True``, the function returns a generator and assets will be yielded
one after another when the generator is iterated over.
verbose : `bool`, optional
If ``True`` progress of the importing will be dynamically reported with
a progress bar.
Returns
-------
lazy_list : :map:`LazyList` or generator of :map:`Image`
A :map:`LazyList` or generator yielding :map:`Image` instances found
to match the glob pattern provided.
Raises
------
ValueError
If no images are found at the provided glob.
Examples
--------
Import images at 20% scale from a huge collection:
>>> images = []
>>> for img in menpo.io.import_images('./massive_image_db/*'):
>>> # rescale to a sensible size as we go
>>> images.append(img.rescale(0.2))
"""
kwargs = {'normalise': normalise}
return _import_glob_lazy_list(
pattern, image_types,
max_assets=max_images, shuffle=shuffle,
landmark_resolver=landmark_resolver,
landmark_ext_map=image_landmark_types,
landmark_attach_func=_import_object_attach_landmarks,
as_generator=as_generator,
verbose=verbose,
importer_kwargs=kwargs
)
def import_videos(pattern, max_videos=None, shuffle=False,
landmark_resolver=same_name_video, normalise=True,
importer_method='ffmpeg', as_generator=False, verbose=False):
r"""Multiple video (and associated landmarks) importer.
For each video found yields a :map:`LazyList`. By default, landmark files
sharing the same filename stem will be imported and attached with a group
name based on the extension of the landmark file appended with the frame
number, although this behavior can be customised (see `landmark_resolver`).
Note that this is a function returns a :map:`LazyList`. Therefore, the
function will return immediately and indexing into the returned list
will load an image at run time. If all images should be loaded, then simply
wrap the returned :map:`LazyList` in a Python `list`.
Parameters
----------
pattern : `str`
A glob path pattern to search for videos. Every video found to match
the glob will be imported one by one. See :map:`video_paths` for more
details of what videos will be found.
max_videos : positive `int`, optional
If not ``None``, only import the first ``max_videos`` found. Else,
import all.
shuffle : `bool`, optional
If ``True``, the order of the returned videos will be randomised. If
``False``, the order of the returned videos will be alphanumerically
ordered.
landmark_resolver : `function`, optional
This function will be used to find landmarks for the
video. The function should take two arguments (the path to the video and
the frame number) and return a dictionary of the form ``{'group_name':
'landmark_filepath'}`` Default finds landmarks with the same name as the
video file, appended with '_{frame_number}'.
normalise : `bool`, optional
If ``True``, normalise the frame pixels between 0 and 1 and convert
to floating point. If ``False``, the native datatype of the image will
be maintained (commonly `uint8`). Note that in general Menpo assumes
:map:`Image` instances contain floating point data - if you disable this
flag you will have to manually convert the farmes you import to floating
point before doing most Menpo operations. This however can be useful to
save on memory usage if you only wish to view or crop the frames.
importer_method : {'ffmpeg', 'avconv'}, optional
A string representing the type of importer to use, by default ffmpeg
is used.
as_generator : `bool`, optional
If ``True``, the function returns a generator and assets will be yielded
one after another when the generator is iterated over.
verbose : `bool`, optional
If ``True`` progress of the importing will be dynamically reported with
a progress bar.
Returns
-------
lazy_list : :map:`LazyList` or generator of :map:`LazyList`
A :map:`LazyList` or generator yielding :map:`LazyList` instances that
wrap the video object.
Raises
------
ValueError
If no videos are found at the provided glob.
Examples
--------
Import videos at and rescale every frame of each video:
>>> videos = []
>>> for video in menpo.io.import_videos('./set_of_videos/*'):
>>> frames = []
>>> for frame in video:
>>> # rescale to a sensible size as we go
>>> frames.append(frame.rescale(0.2))
>>> videos.append(frames)
"""
kwargs = {'normalise': normalise}
video_importer_methods = {'ffmpeg': ffmpeg_video_types}
if importer_method not in video_importer_methods:
raise ValueError('Unsupported importer method requested. Valid values '
'are: {}'.format(video_importer_methods.keys()))
return _import_glob_lazy_list(
pattern, video_importer_methods[importer_method],
max_assets=max_videos, shuffle=shuffle,
landmark_resolver=landmark_resolver,
landmark_ext_map=image_landmark_types,
landmark_attach_func=_import_lazylist_attach_landmarks,
as_generator=as_generator,
verbose=verbose,
importer_kwargs=kwargs
)
def import_landmark_files(pattern, max_landmarks=None, shuffle=False,
as_generator=False, verbose=False):
r"""Import Multiple landmark files.
For each landmark file found returns an importer than
returns a :map:`LandmarkGroup`.
Note that this is a function returns a :map:`LazyList`. Therefore, the
function will return immediately and indexing into the returned list
will load the landmarks at run time. If all landmarks should be loaded, then
simply wrap the returned :map:`LazyList` in a Python `list`.
Parameters
----------
pattern : `str`
A glob path pattern to search for landmark files. Every
landmark file found to match the glob will be imported one by one.
See :map:`landmark_file_paths` for more details of what landmark files
will be found.
max_landmarks : positive `int`, optional
If not ``None``, only import the first ``max_landmark_files`` found.
Else, import all.
shuffle : `bool`, optional
If ``True``, the order of the returned landmark files will be
randomised. If ``False``, the order of the returned landmark files will
be alphanumerically ordered.
as_generator : `bool`, optional
If ``True``, the function returns a generator and assets will be yielded
one after another when the generator is iterated over.
verbose : `bool`, optional
If ``True`` progress of the importing will be dynamically reported.
Returns
-------
lazy_list : :map:`LazyList` or generator of :map:`LandmarkGroup`
A :map:`LazyList` or generator yielding :map:`LandmarkGroup` instances
found to match the glob pattern provided.
Raises
------
ValueError
If no landmarks are found at the provided glob.
"""
return _import_glob_lazy_list(pattern, image_landmark_types,
max_assets=max_landmarks, shuffle=shuffle,
as_generator=as_generator, verbose=verbose)
def import_pickles(pattern, max_pickles=None, shuffle=False, as_generator=False,
verbose=False):
r"""Import multiple pickle files.
Menpo unambiguously uses ``.pkl`` as it's choice of extension for pickle
files. Menpo also supports automatic importing of gzip compressed pickle
files - matching files with extension ``pkl.gz`` will be automatically
un-gzipped and imported.
Note that this is a function returns a :map:`LazyList`. Therefore, the
function will return immediately and indexing into the returned list
will load the landmarks at run time. If all pickles should be loaded, then
simply wrap the returned :map:`LazyList` in a Python `list`.
Parameters
----------
pattern : `str`
The glob path pattern to search for pickles. Every pickle file found
to match the glob will be imported one by one.
max_pickles : positive `int`, optional
If not ``None``, only import the first ``max_pickles`` found.
Else, import all.
shuffle : `bool`, optional
If ``True``, the order of the returned pickles will be randomised. If
``False``, the order of the returned pickles will be alphanumerically
ordered.
as_generator : `bool`, optional
If ``True``, the function returns a generator and assets will be yielded
one after another when the generator is iterated over.
verbose : `bool`, optional
If ``True`` progress of the importing will be dynamically reported.
Returns
-------
lazy_list : :map:`LazyList` or generator of Python objects
A :map:`LazyList` or generator yielding Python objects inside the
pickle files found to match the glob pattern provided.
Raises
------
ValueError
If no pickles are found at the provided glob.
"""
return _import_glob_lazy_list(pattern, pickle_types,
max_assets=max_pickles, shuffle=shuffle,
as_generator=as_generator, verbose=verbose)
def _import_builtin_asset(asset_name, **kwargs):
r"""Single builtin asset (landmark or image) importer.
Imports the relevant builtin asset from the ``./data`` directory that
ships with Menpo.
Parameters
----------
asset_name : `str`
The filename of a builtin asset (see :map:`ls_builtin_assets`
for allowed values)
Returns
-------
asset :
An instantiated :map:`Image` or :map:`LandmarkGroup` asset.
"""
asset_path = data_path_to(asset_name)
# Import could be either an image or a set of landmarks, so we try
# importing them both separately.
try:
return _import(asset_path, image_types,
landmark_ext_map=image_landmark_types,
landmark_attach_func=_import_object_attach_landmarks,
importer_kwargs=kwargs)
except ValueError:
return _import(asset_path, image_landmark_types,
importer_kwargs=kwargs)
def ls_builtin_assets():
r"""List all the builtin asset examples provided in Menpo.
Returns
-------
list of strings
Filenames of all assets in the data directory shipped with Menpo
"""
return [p.name for p in data_dir_path().glob('*') if not p.is_dir()]
def import_builtin(x):
def execute(**kwargs):
return _import_builtin_asset(x, **kwargs)
return execute
class BuiltinAssets(object):
def __call__(self, asset_name, **kwargs):
return _import_builtin_asset(asset_name, **kwargs)
import_builtin_asset = BuiltinAssets()
for asset in ls_builtin_assets():
setattr(import_builtin_asset, asset.replace('.', '_'),
import_builtin(asset))
def image_paths(pattern):
r"""
Return image filepaths that Menpo can import that match the glob pattern.
"""
return glob_with_suffix(pattern, image_types)
def video_paths(pattern):
r"""
Return video filepaths that Menpo can import that match the glob pattern.
"""
return glob_with_suffix(pattern, ffmpeg_video_types)
def landmark_file_paths(pattern):
r"""
Return landmark file filepaths that Menpo can import that match the glob
pattern.
"""
return glob_with_suffix(pattern, image_landmark_types)
def _import_glob_lazy_list(pattern, extension_map, max_assets=None,
landmark_resolver=same_name, shuffle=False,
as_generator=False, landmark_ext_map=None,
landmark_attach_func=None, importer_kwargs=None,
verbose=False):
filepaths = list(glob_with_suffix(pattern, extension_map,
sort=(not shuffle)))
if shuffle:
random.shuffle(filepaths)
if max_assets:
filepaths = filepaths[:max_assets]
n_files = len(filepaths)
if n_files == 0:
raise ValueError('The glob {} yields no assets'.format(pattern))
lazy_list = LazyList([partial(_import, f, extension_map,
landmark_resolver=landmark_resolver,
landmark_ext_map=landmark_ext_map,
landmark_attach_func=landmark_attach_func,
importer_kwargs=importer_kwargs)
for f in filepaths])
if verbose and as_generator:
# wrap the generator with the progress reporter
lazy_list = print_progress(lazy_list, prefix='Importing assets',
n_items=n_files)
elif verbose:
print('Found {} assets, index the returned LazyList to import.'.format(
n_files))
if as_generator:
return (a for a in lazy_list)
else:
return lazy_list
def _import_object_attach_landmarks(built_objects, landmark_resolver,
landmark_ext_map=None):
# handle landmarks
if landmark_ext_map is not None:
for x in built_objects:
lm_paths = landmark_resolver(x.path) # use the users fcn to find
# paths
if lm_paths is None:
continue
for group_name, lm_path in lm_paths.items():
lms = _import(lm_path, landmark_ext_map, asset=x)
if x.n_dims == lms.n_dims:
x.landmarks[group_name] = lms
def _import_lazylist_attach_landmarks(built_objects, landmark_resolver,
landmark_ext_map=None):
# handle landmarks
if landmark_ext_map is not None:
for k in range(len(built_objects)):
x = built_objects[k]
# Use the users function to find landmarks
lm_paths = partial(landmark_resolver, x.path)
# Do a little trick where we compose the landmark resolution onto
# the lazy list indexing - after the item has been indexed.
def wrap_landmarks(f, index):
obj = f()
for group_name, lm_path in lm_paths(index).items():
lms = _import(lm_path, landmark_ext_map, asset=obj)
if obj.n_dims == lms.n_dims:
obj.landmarks[group_name] = lms
return obj
new_ll = LazyList([partial(wrap_landmarks, c, i)
for i, c in enumerate(x._callables)])
built_objects[k] = new_ll
def _import(filepath, extensions_map, landmark_resolver=same_name,
landmark_ext_map=None, landmark_attach_func=None,
asset=None, importer_kwargs=None):
r"""
Creates an importer for the filepath passed in, and then calls build on
it, returning a list of assets or a single asset, depending on the
file type.
The type of assets returned are specified by the `extensions_map`.
Parameters
----------
filepath : string
The filepath to import
extensions_map : dictionary (String, :class:`menpo.io.base.Importer`)
A map from extensions to importers. The importers are expected to be
non-instantiated classes. The extensions are expected to
contain the leading period eg. `.obj`.
landmark_ext_map : dictionary (str, :map:`Importer`), optional
If not None an attempt will be made to import annotations with
extensions defined in this mapping. If None, no attempt will be
made to import annotations.
landmark_resolver: function, optional
If not None, this function will be used to find landmarks for each
asset. The function should take one argument (the asset itself) and
return a dictionary of the form {'group_name': 'landmark_filepath'}
asset: object, optional
If not None, the asset will be passed to the importer's build method
as the asset kwarg
importer_kwargs: dict, optional:
kwargs that will be supplied to the importer if not None
Returns
-------
assets : asset or list of assets
The loaded asset or list of assets.
"""
path = _norm_path(filepath)
if not path.is_file():
raise ValueError("{} is not a file".format(path))
# below could raise ValueError as well...
importer = importer_for_filepath(path, extensions_map,
importer_kwargs=importer_kwargs)
if asset is not None:
built_objects = importer.build(asset=asset)
else:
built_objects = importer.build()
# landmarks are iterable so check for list precisely
# enforce a list to make processing consistent
if not isinstance(built_objects, list):
built_objects = [built_objects]
# attach path if there is no x.path already.
for x in built_objects:
if not hasattr(x, 'path'):
try:
x.path = path
except AttributeError:
pass # that's fine! Probably a dict/list from PickleImporter.
if landmark_attach_func is not None:
landmark_attach_func(built_objects, landmark_resolver,
landmark_ext_map=landmark_ext_map)
# undo list-ification (if we added it!)
if len(built_objects) == 1:
built_objects = built_objects[0]
return built_objects
def _pathlib_glob_for_pattern(pattern, sort=True):
r"""Generator for glob matching a string path pattern
Splits the provided ``pattern`` into a root path for pathlib and a
subsequent glob pattern to be applied.
Parameters
----------
pattern : `str`
Path including glob patterns. If no glob patterns are present and the
pattern is a dir, a '**/*' pattern will be automatically added.
sort : `bool`, optional
If True, the returned paths will be sorted. If False, no guarantees are
made about the ordering of the results.
Yields
------
Path : A path to a file matching the provided pattern.
Raises
------
ValueError
If the pattern doesn't contain a '*' wildcard and is not a directory
"""
pattern = _norm_path(pattern)
pattern_str = str(pattern)
gsplit = pattern_str.split('*', 1)
if len(gsplit) == 1:
# no glob provided. Is the provided pattern a dir?
if Path(pattern).is_dir():
preglob = pattern_str
pattern = '*'
else:
raise ValueError('{} is an invalid glob and '
'not a dir'.format(pattern))
else:
preglob = gsplit[0]
pattern = '*' + gsplit[1]
if not os.path.isdir(preglob):
# the glob pattern is in the middle of a path segment. pair back
# to the nearest dir and add the reminder to the pattern
preglob, pattern_prefix = os.path.split(preglob)
pattern = pattern_prefix + pattern
p = Path(preglob)
paths = p.glob(str(pattern))
if sort:
paths = sorted(paths)
return paths
def glob_with_suffix(pattern, extensions_map, sort=True):
r"""
Filters the results from the glob pattern passed in to only those files
that have an importer given in `extensions_map`.
Parameters
----------
pattern : string
A UNIX style glob pattern to match against.
extensions_map : dictionary (String, :class:`menpo.io.base.Importer`)
A map from extensions to importers. The importers are expected to be
non-instantiated classes. The extensions are expected to
contain the leading period eg. `.obj`.
sort : `bool`, optional
If True, the returned paths will be sorted. If False, no guarantees are
made about the ordering of the results.
Yields
------
filepaths : list of string
The list of filepaths that have valid extensions.
"""
for path in _pathlib_glob_for_pattern(pattern, sort=sort):
# we want to extract '.pkl.gz' as an extension - for this we need to
# use suffixes and join.
# .suffix only takes
# However, the filename might have a '.' in it, e.g. '1.1.png'.
# In this case, try again with just the suffix.
if (''.join(path.suffixes[-2:]) in extensions_map or
path.suffix in extensions_map):
yield path
def importer_for_filepath(filepath, extensions_map, importer_kwargs=None):
r"""
Given a filepath, return the appropriate importer as mapped by the
extension map.
Parameters
----------
filepath : `pathlib.Path`
The filepath to get importers for
extensions_map : dictionary (String, :class:`menpo.io.base.Importer`)
A map from extensions to importers. The importers are expected to be
a subclass of :class:`Importer`. The extensions are expected to
contain the leading period eg. `.obj`.
importer_kwargs: dictionary, optional
kwargs that will be supplied to the importer if not None.
Returns
--------
importer: :class:`menpo.io.base.Importer` instance
Importer as found in the `extensions_map` instantiated for the
filepath provided.
"""
suffix = ''.join(filepath.suffixes)
if suffix.isupper():
# If for some reason the ending is in capital letters, make them lower
# case first.
suffix = suffix.lower()
importer_type = extensions_map.get(suffix)
# we couldn't find an importer for all the suffixes (e.g .foo.bar)
# maybe the file stem has '.' in it? -> try again but this time just use the
# final suffix (.bar). (Note we first try '.foo.bar' as we want to catch
# cases like 'pkl.gz')
if importer_type is None and len(filepath.suffixes) > 1:
suffix = filepath.suffix
importer_type = extensions_map.get(suffix)
if importer_type is None:
raise ValueError("{} does not have a "
"suitable importer.".format(suffix))
if importer_kwargs is not None:
return importer_type(str(filepath), **importer_kwargs)
else:
return importer_type(str(filepath))
class Importer(object):
r"""
Abstract representation of an Importer. Construction of an importer simply
sets the filepaths etc up. To actually import the object and build a valid
representation, the `build` method must be called. This allows a set
of importers to be instantiated but the heavy duty importing to happen
separately.
Parameters
----------
filepath : string
An absolute filepath
"""
def __init__(self, filepath):
self.filepath = os.path.abspath(os.path.expanduser(filepath))
self.filename = os.path.splitext(os.path.basename(self.filepath))[0]
self.extension = os.path.splitext(self.filepath)[1]
self.folder = os.path.dirname(self.filepath)
def build(self):
r"""
Performs the heavy lifting for the importer class. This actually reads
the file in from disk and does any necessary parsing of the data in to
an appropriate format.
Returns
-------
object : object or list
An instantiated class of the expected type. For example, for an
`.obj` importer, this would be a
:class:`menpo.shape.Trimesh`. If multiple objects need
to be returned from one importer, a list must be returned (and
not a subclass of list - explicitly a list).
"""
raise NotImplementedError()
# Avoid circular imports
from menpo.io.input.extensions import (image_landmark_types, image_types,
pickle_types, ffmpeg_video_types)
|
# encoding:utf-8
import requests
import base64
import json
filename = 'res.json'
request_url = "https://aip.baidubce.com/rest/2.0/image-classify/v2/advanced_general"
f = open('./res.jpg', 'rb')
img = base64.b64encode(f.read())
params = {"image": img}
access_token = 'your own access_token'
request_url = request_url + "?access_token=" + access_token
headers = {'content-type': 'application/x-www-form-urlencoded'}
response = requests.post(request_url, data=params, headers=headers)
if response:
with open(filename, 'w', encoding='utf-8') as file_obj:
json.dump(response.json(), file_obj, ensure_ascii=False, indent=4)
reslines = response.json()['result']
res = ' '.join(lines['keyword'] for lines in reslines)
print(res)
|
#!/usr/bin/env python
# coding=UTF-8
# BSD 2-Clause License
# Copyright (c) 2021, Yury Demidenko (Beigesoft™)
# All rights reserved.
# See the LICENSE in the root source folder
#Classify NIST data - train (900 samples) and test (the rest 100) files
#NIST data from http://www.cis.jhu.edu/~sachin/digit/digit.html 1000 28x28 digits (unsigned char 8bit): data0..data9
#required path to the data in command line sys.argv[1]
import sys, os
sys.path += [os.path.dirname(os.path.abspath (__file__)) + '/../..']
from BsLibSvm import *
from BsLibMisc import *
import numpy as np
def prnUsage ():
print ('You must pass path to the NIST files 1000 28x28 digits (unsigned char 8bit): data0..data9!')
print ('from http://www.cis.jhu.edu/~sachin/digit/digit.html')
print ('Use: python dig1000bssvm.py [path_to_nist_files]')
ip = 0
pth = ''
for arg in sys.argv:
if ip == 1:
pth = arg
ip += 1
if pth == '' or pth == '-h':
prnUsage ()
exit (1)
digCnt = 1000
digSz = 28
pixCnt = digSz * digSz
pixCntAll = digCnt * pixCnt
trainCnt = 900
testCnt = digCnt - trainCnt
testOfst = trainCnt * pixCnt
testCntTot = testCnt * 10
wrongCnt = 0
PRED = np.zeros ((testCntTot), dtype=np.uint8)
NUMS = [0,1,2,3,4,5,6,7,8,9]
try:
for d in range (10):
fnme = pth + "/data" + str (d)
NUMS[d] = np.fromfile (fnme, dtype=np.uint8)
if NUMS[d].shape[0] != pixCntAll:
print ('It must be 1000 uint8 28x28 samples in ', fnme)
raise
#just for visual control, print several samples:
print ("Samples #0,1,2,900,901,902 From file: ", fnme)
bsPrnImgTxt (NUMS[d], 0, digSz)
bsPrnImgTxt (NUMS[d], 1*pixCnt, digSz)
bsPrnImgTxt (NUMS[d], 2*pixCnt, digSz)
bsPrnImgTxt (NUMS[d], 900*pixCnt, digSz)
bsPrnImgTxt (NUMS[d], 901*pixCnt, digSz)
bsPrnImgTxt (NUMS[d], 902*pixCnt, digSz)
#there is 5 #900 that looks like 6
except:
prnUsage ()
print (sys.exc_info ()[0])
raise
#scaling to 0-1
for d in range (10):
for ipx in range (pixCntAll):
if NUMS[d][ipx] > 0:
NUMS[d][ipx] = 1
#kern = BsSvmPolyKern (1.0, 9.0)
kern = BsSvmRbfKern (1.0/pixCnt)
X = np.zeros ((2, pixCnt))
Y = np.zeros ((2), dtype=np.int16)
Y[1] = 1
minStp = 0.001
YNEGPOS = False
for d in range (10):
it = 0
for i in range (testCnt):
for ipx in range (pixCnt):
X[0][ipx] = NUMS[d][testOfst + i*pixCnt + ipx]
for j in range (trainCnt):
for ipx in range (pixCnt):
X[1][ipx] = NUMS[d][j*pixCnt + ipx]
if it == 0:
YNEGPOS = bsSvmCheckData (X, Y, kern)
Wa, ba, cntWrnga, marga = bsSvmTrain (X, Y, kern, YNEGPOS, minStp)
if it < 2:
print ('Digit test:')
bsPrnImgTxt (X[0], 0, digSz)
print ('Digit train:')
bsPrnImgTxt (X[1], 0, digSz)
it += 1
#non-scaling poly 9
#0-0 MARGB12m, cntItr, cntWrngMin, cntWrngMax, i1m, i2m: [4.57884289e+39 -2.82485959e+38 -1.15210584e+41] 7840 0 0 0 1
#0-1 MARGB12m, cntItr, cntWrngMin, cntWrngMax, i1m, i2m: [1.40568112e+37 4.13577278e+38 5.86868106e+37] 7840 0 0 0 1
#scaling 0-1 poly 9
#0-0 MARGB12m, cntItr, cntWrngMin, cntWrngMax, i1m, i2m: [1.91764603e+17 1.46320679e+19 9.71335721e+18] 7840 0 1 0 1
# MARGB12m, cntItr, cntWrngMin, cntWrngMax, i1m, i2m: [1.08844573e+20 -1.04308405e+19 -2.74254261e+21] 7840 0 0 0 1
#0-1 MARGB12m, cntItr, cntWrngMin, cntWrngMax, i1m, i2m: [6.32187058e+17 -7.60231059e+17 -1.70014164e+19] 7840 0 0 1 0
# MARGB12m, cntItr, cntWrngMin, cntWrngMax, i1m, i2m: [6.60399031e+17 -3.54520878e+16 -1.70014164e+19] 7840 0 0 1 0
#0-2 MARGB12m, cntItr, cntWrngMin, cntWrngMax, i1m, i2m: [1.19722044e+19 -5.61752833e+18 -3.07720364e+20] 7840 0 0 0 1
# MARGB12m, cntItr, cntWrngMin, cntWrngMax, i1m, i2m: [6.16342828e+19 -1.70014164e+19 -1.60041537e+21] 7840 0 0 0 1
# MARGB12m, cntItr, cntWrngMin, cntWrngMax, i1m, i2m: [3.27228745e+17 -8.59475475e+18 -1.70014164e+19] 7840 0 0 1 0
#scaling 0-1 RBF:
#0-2 MARGB12m, cntItr, cntWrngMin, cntWrngMax, i1m, i2m: [7.38265432e-110 -3.14727032e-144 -1.85156115e-108] 7840 0 1 0 1
# MARGB12m, cntItr, cntWrngMin, cntWrngMax, i1m, i2m: [1.78136532e-106 -7.92801133e-153 -4.57641200e-105] 7840 0 1 0 1
# MARGB12m, cntItr, cntWrngMin, cntWrngMax, i1m, i2m: [3.01857140e-101 -2.29788796e-145 -7.63048368e-100] 7840 0 1 0 1
# MARGB12m, cntItr, cntWrngMin, cntWrngMax, i1m, i2m: [2.73135996e-142 -7.92801133e-153 -7.01699327e-141] 7840 0 1 0 1
# MARGB12m, cntItr, cntWrngMin, cntWrngMax, i1m, i2m: [8.93280633e-130 -9.65098252e-171 -2.34645687e-128] 7840 0 1 0 1
#0-1 MARGB12m, cntItr, cntWrngMin, cntWrngMax, i1m, i2m: [3.08597313e-154 -6.18540508e-177 -7.92801133e-153] 7840 0 1 1 0
# MARGB12m, cntItr, cntWrngMin, cntWrngMax, i1m, i2m: [3.08597313e-154 -7.26433219e-195 -7.92801133e-153] 7840 0 1 1 0
# MARGB12m, cntItr, cntWrngMin, cntWrngMax, i1m, i2m: [3.08597313e-154 -2.66176079e-190 -7.92801133e-153] 7840 0 1 1 0
# MARGB12m, cntItr, cntWrngMin, cntWrngMax, i1m, i2m: [3.08597313e-154 -1.46541764e-177 -7.92801133e-153] 7840 0 1 1 0
#0-0 MARGB12m, cntItr, cntWrngMin, cntWrngMax, i1m, i2m: [7.94961201e-118 -1.45103947e-154 -2.03454240e-116] 7840 0 1 0 1
# MARGB12m, cntItr, cntWrngMin, cntWrngMax, i1m, i2m: [8.24965375e-113 -1.14570372e-143 -2.07064762e-111] 7840 0 1 0 1
# MARGB12m, cntItr, cntWrngMin, cntWrngMax, i1m, i2m: [7.93757358e-118 -1.43589397e-154 -2.03456050e-116] 7840 0 1 0 1
# MARGB12m, cntItr, cntWrngMin, cntWrngMax, i1m, i2m: [1.96196182e-089 -4.16017247e-143 -4.92839412e-088] 7840 0 1 0 1
# MARGB12m, cntItr, cntWrngMin, cntWrngMax, i1m, i2m: [4.51573110e-097 -5.47112966e-142 -1.13613548e-095] 7840 0 1 0 1
# conclusion - doesn't work and slow - transforming into pixels count dimension plus transformation
# ftest = open (pth+'/nist'+str(digCnt)+'x'+str(digSz)+'x'+str(digSz)+'t.pred', 'w')
# accur = (testCntTot - wrongCnt) / testCntTot * 100.0
# ftest.write ('Accuracy = ' + str (accur) + '%\n')
# for d in range (10):
# for j in range (testCnt):
# ftest.write (' ' + str (PRED[d * testCnt + j]))
# ftest.write ('\n')
# ftest.close ()
|
import pandas as pd
from pickle import dump
from typing import List, Tuple
from sklearn.preprocessing import MinMaxScaler
def extract_features_from_dataset(data: pd.DataFrame) -> pd.DataFrame:
"""
Extract features from dataset.
Parameters
----------
data: pd.DataFrame
Market chart data.
Returns
-------
pd.DataFrame
Pandas dataframe of features.
"""
rows = []
for _, row in data.iterrows():
row_data = dict(
day_of_week=row["timestamp"].dayofweek,
day_of_month=row["timestamp"].day,
week_of_year=row["timestamp"].week,
month_of_year=row["timestamp"].month,
open=row["open"],
high=row["high"],
low=row["low"],
close=row["close"],
close_change=row["close"] - row["open"],
)
rows.append(row_data)
return pd.DataFrame(rows)
def split_data(data: pd.DataFrame) -> pd.DataFrame:
"""
Split data into training and test sets.
Parameters
----------
data: pd.DataFrame
Market chart data.
Returns
-------
pd.DataFrame
Pandas dataframe of training and test data.
"""
train_size = int(len(data) * 0.9)
train_df, test_df = data[:train_size], data[train_size + 1:]
return train_df, test_df
def scale_data(
train_df: pd.DataFrame, test_df: pd.DataFrame, dir_path: str,
) -> pd.DataFrame:
"""
Scale data to have a mean of 0 and a standard deviation of 1.
Parameters
----------
train_df: pd.DataFrame
Training data.
test_df: pd.DataFrame
Test data.
dir_path: str
Directory path to save the scaler.
Returns
-------
pd.DataFrame
Scaled training and test data.
"""
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(train_df)
scaled_train_df = pd.DataFrame(
scaler.transform(train_df),
index=train_df.index,
columns=train_df.columns,
)
scaled_test_df = pd.DataFrame(
scaler.transform(test_df),
index=test_df.index,
columns=test_df.columns,
)
dump(scaler, open(f"{dir_path}/scaler.pkl", "wb"))
return scaled_train_df, scaled_test_df
def create_sequences(
input_data: pd.DataFrame,
target_column: str,
sequence_length: int,
) -> List[Tuple[pd.DataFrame, float]]:
"""
Create sequences from the input data.
Parameters
----------
input_data: pd.DataFrame
Pandas dataframe of input data.
target_column: str
Name of the column to predict.
sequence_length: int
Length of the sequence.
Returns
-------
List[Tuple[pd.DataFrame, float]]
List of sequences.
"""
sequences = []
size = len(input_data)
for i in range(size - sequence_length):
sequence = input_data[i: i + sequence_length]
label_position = i + sequence_length
label = input_data.iloc[label_position][target_column]
sequences.append([sequence, label])
return sequences
def split_train_and_val_sequences(
sequences: List[Tuple[pd.DataFrame, float]],
val_size: float,
) -> Tuple[List[Tuple[pd.DataFrame, float]]]:
"""
Split sequences into training and validation sets.
Parameters
----------
sequences: List[Tuple[pd.DataFrame, float]]
List of sequences.
val_size: float
Percentage of the data to use as validation.
Returns
-------
Tuple[List[Tuple[pd.DataFrame, float]]]
Tuple of training and validation sequences.
"""
train_sequences, val_sequences = [], []
for sequence, label in sequences:
if len(train_sequences) < len(sequences) * (1 - val_size):
train_sequences.append((sequence, label))
else:
val_sequences.append((sequence, label))
return train_sequences, val_sequences
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# sudo i2cdetect -y 1
from __future__ import print_function
import time
from led_matrix import LEDDisplay
from pygecko.multiprocessing import geckopy
def got_msg(t, m):
# want to do something if message arrives
pass
def matrix(**kwargs):
geckopy.init_node(**kwargs)
rate = geckopy.Rate(1)
test = kwargs.get('test', False)
matrix = LEDDisplay()
s = geckopy.Subscriber(['led'], got_msg)
i = 0
while not geckopy.is_shutdown():
# if s has message, call function
# else update led
geckopy.log('x')
# matrix.setRandom()
matrix.update()
# matrix.set(1,1,127)
# matrix.clear()
# matrix.display.set_pixel(i//8,i%8, 1)
# matrix.write()
# i = (i+1)%64
rate.sleep()
if __name__ == "__main__":
kw = {'test': True}
matrix(kwargs=kw)
|
"""
InfluxDB-related functionality
------------------------------
Include `hadmin-stats-influxd`.
"""
import argparse
import hadmin.system
import requests
from requests.auth import HTTPBasicAuth
import sys
import time
SEC_TO_NANOSEC = 10**9
class WriteBody:
def __init__(self):
self.body = []
def sanitize_name(self, name):
bad_chars = ['-', '.']
sanitized = name
for c in bad_chars:
sanitized = sanitized.replace(c, '_')
while '__' in sanitized:
sanitized = sanitized.replace('__', '_')
return sanitized
def add_measurement(self, name, value, timestamp, tag_string=None):
"""
Add a measurement.
timestamp should be a timestamp in seconds. may be floating point or
integer.
"""
tmp = self.sanitize_name(name)
if tag_string:
tmp += ','
tmp += tag_string
tmp += ' '
tmp += 'value=' + str(value)
tmp += ' '
tmp += str(int(timestamp * SEC_TO_NANOSEC))
self.body.append(tmp)
def __str__(self):
return "\n".join(self.body)
class Relay:
"""
Sends statistics to InfluxDB.
Takes an argument array (i.e. sys.argv) and configures itself from that.
"""
COMPONENTS = {
'NodeManager': hadmin.system.rest_nm,
'ResourceManager': hadmin.system.rest_rm
}
def __init__(self, args):
self._username = None
self._password = None
self._tag_string = None
self._interval = 10
self.args = self.parse_args(args)
def run(self):
print('Sending metrics from ' + self.args.component + ' to ' +
self.args.influxdb_address)
print('Using database ' + self.args.database)
if self.using_auth():
print('Using username ' + self.username)
if self.tag_string:
print('Adding tag string "' + self.tag_string +
'" to requests')
while True:
body = self.get_request()
auth = self.get_auth()
resp = requests.post(self.args.influxdb_address + '/write',
auth=auth,
params={'db': self.args.database},
data=str(body))
if resp.status_code != 204:
if resp.status_code == 200:
print('InfluxDB could not process the request')
elif resp.status_code == 404:
print('Database ' + self.args.database + ' does not exist')
else:
print('Failed to write request (' + str(resp.status_code) +
'):')
print(str(body))
time.sleep(self.interval)
return 0
def get_request(self):
thing = Relay.COMPONENTS[self.args.component]()
req = WriteBody()
t = time.time()
d = dict(thing)
for key in d:
req.add_measurement(key, d[key], t, self.tag_string)
return req
def get_auth(self):
if self.using_auth():
return HTTPBasicAuth(self.username, self.password)
return None
def using_auth(self):
if self.username and self.password:
return True
return False
def parse_args(self, args):
parser = argparse.ArgumentParser(prog='hadmin-stats-influxd',
description='send metrics to influx')
# Optional args
parser.add_argument('--tag-string', nargs=1, dest='tag_string')
parser.add_argument('--interval', nargs=1)
parser.add_argument('--username', nargs=1)
parser.add_argument('--password', nargs=1)
parser.add_argument('influxdb_address')
parser.add_argument('database')
parser.add_argument('component',
help="one of " +
', '.join(sorted(Relay.COMPONENTS.keys())))
return parser.parse_args(args)
@property
def username(self):
if self._username:
return self._username
if self.args.username:
self._username = self.args.username[0]
return self._username
@property
def password(self):
if self._password:
return self._password
if self.args.password:
self._password = self.args.password[0]
return self._password
@property
def interval(self):
if self._interval:
return self._interval
if self.args.interval:
self._interval = int(self.args.interval[0])
return self._tag_string
@property
def tag_string(self):
if self._tag_string:
return self._tag_string
if self.args.tag_string:
self._tag_string = self.args.tag_string[0]
return self._tag_string
def run():
r = Relay(sys.argv[1:])
return r.run()
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
import json
from scrape_utils import parse_macro
with webdriver.Chrome() as driver:
wait = WebDriverWait(driver, 10)
driver.get('https://eu4.paradoxwikis.com/Modifier_list')
rows = driver.find_elements(By.XPATH, '//table[./thead/tr/th[5][contains(text(),"Version")]]/tbody/tr')
modifiers = {}
for row in rows:
cells = row.find_elements(By.TAG_NAME, 'td')
mod_id = cells[0].text
example = cells[1].text
parts = example.split(' = ')
example_value = parts[-1]
value_type = "Integer"
if '.' in example_value:
value_type = "Float"
elif 'yes' in example_value:
value_type = "Boolean"
modifier = {
'id': mod_id,
'value_type': value_type,
'example': example,
'description': cells[2].text,
'effect_type': cells[3].text,
'version_added': cells[4].text,
'macro': []
}
parse_macro(modifier)
modifiers[mod_id] = modifier
with open("eu4_modifiers.json", "w") as data_file:
json.dump(modifiers, data_file, indent=4, sort_keys=False)
print(f"Parsed {len(modifiers)} modifiers")
|
import json
'''
Immutable object representing the current gaming status.
'''
class Status:
def __init__(self, gaming_status):
if not isinstance(gaming_status, bool):
raise ValueError("Specified gaming status must be of type bool!")
self.__gaming_status = gaming_status
def get_gaming_status(self):
return bool(self.__gaming_status)
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
def __str__(self):
return "Gaming status: " + str(self.__gaming_status)
|
# Generated by Django 3.0.5 on 2020-04-24 17:40
from django.db import migrations, models
import paint.validators
class Migration(migrations.Migration):
dependencies = [
('paint', '0015_auto_20200424_2227'),
]
operations = [
migrations.AlterField(
model_name='purchase',
name='Total_amount',
field=models.DecimalField(decimal_places=2, max_digits=10, validators=[paint.validators.nonneg], verbose_name='Total Amount'),
),
]
|
from unittest import TestCase
class TestTransaction(TestCase):
pass
|
"""
AVCaesar API class wrapper
"""
import json
import sys
import logging
try:
import requests
except ImportError as err:
print("[!] error, missing %s" % (err))
sys.exit()
class CaesarAPI():
'''
API wrapper for AV Caesar
Docs: https://avcaesar.malware.lu/docs/api
'''
def __init__(self, api_key):
self.api_key = api_key
self.base_url = "https://avcaesar.malware.lu/api/v1/"
logging.getLogger().setLevel(logging.INFO)
def get_api_info(self):
'''
Name: get_api_info
Purpose: get info about API usage from provider
Parameters: N/A
'''
try:
req = requests.get(self.base_url+"/user/quota", cookies=dict(apikey=self.api_key))
except requests.exceptions.RequestException as req_err:
return "[!] Error getting API info from AV Caesar!\n\t %s" % str(req_err)
if req.status_code == 200:
logging.info("[*] AVCaesar successfully requested API info endponit.")
return("\n\t[ AV Caesar ]\n\t\t[+] Analysis %s/%s" \
"\n\t\t[+] Download: %s/%s\n\t\t[+] Info: %s/%s" %
(req.json().get('analysis').get('current'),
req.json().get('analysis').get('limit'),
req.json().get('download').get('current'),
req.json().get('download').get('limit'),
req.json().get('info').get('current'),
req.json().get('info').get('limit')))
return "\n[!] Error, A/V Caesar API request for API limits went "\
"horribly wrong. %s" % str(req.text)
def latest_submissions(self):
'''
Name: latest_submissions
Purpose: get latest ioc contents.
Parameters: N/A
Return: string.
'''
logging.info("[*] AVCaesar does not provide a latest-submissions feed.")
return "\t[*] AV Caesar does not support latest submissions."
def search(self, ioc_val):
'''
Name: search
Purpose: search for information about a particular ioc.
Parameters: [ioc_val] string value to specify ioc to search for.
return: string
'''
try:
req = requests.get(self.base_url+"/sample/"+ioc_val, cookies=dict(apikey=self.api_key))
except requests.exceptions.RequestException as req_err:
return "[!] Error searching for ioc from AV Caesar!\n\t %s" % str(req_err)
if req.status_code == 200:
logging.debug("Downloading ioc %s", str(ioc_val))
try:
logging.info("Identified ioc %s", str(ioc_val))
return "\t[AV Caesar]\n"+json.dumps(req.json(), indent=4)
except json.decoder.JSONDecodeError:
# If something is searched out and doesn't return JSON or
# malformed, print the plain text.
if len(req.text) == 0:
return "[!] Error, HTTP request succeeded, but no content"\
" is available."
return req.text
elif req.status_code == 429:
return "\t[!] Error, too many requests being made against AV Caesar."
else:
return "\t[AV Caesar] Hash not found."
def download_sample(self, ioc_value, directory):
'''
Name: download_sample
Purpose: Download a ioc from an API provider and writes sample
byte stream to a file of the ioc name or user provided name.
Param:
[ioc_value] string value indicatin hash (sha{128,256,512}/md5) to
search for.
[file_name] string value specifying the file name to download on
the CLI. Otherwise the file name is the ioc.
Return:
[boolean] True if file downloaded successfully.
False if error occurs.
'''
try:
req = requests.get(self.base_url+"/sample/"+ioc_value+"/download",
cookies=dict(apikey=self.api_key))
except requests.exceptions.RequestException as req_err:
return "[!] Error downloading sample from AV Caesar!\n\t %s" % str(req_err)
if req.status_code == 200:
logging.info("[*] AVCaesar successfully downloaded sample %s.", str(ioc_value))
try:
with open(directory + ioc_value, "wb+") as fout:
fout.write(req.content)
return True
except IOError as err:
print("\t[!] Error writing to file.\n\t%s" % str(err))
else:
print("\t[!] Failed to identify ioc %s.\n\t[ERROR] %s"
% (ioc_value, req.status_code))
return False
|
"""
Test all getters and setters in the Character class
Includes all methods that set and retrieve data
"""
import npc
from npc.character import Character
import pytest
class TestTypeKey:
def test_casing(self):
"""Type key should always be lower case"""
char = Character(type=['Fish', 'Great Ape'])
assert char.type_key == 'fish'
def test_empty(self):
char = Character()
assert char.type_key is None
class TestLocations:
def test_foreign(self):
char = Character()
char.tags('foreign').append('Mars')
assert 'Mars' in char.locations
def test_location(self):
char = Character()
char.tags('location').append('Mars')
assert 'Mars' in char.locations
def test_both(self):
char = Character()
char.tags('location').append('Mars')
char.tags('foreign').append('Mercury')
assert 'Mars' in char.locations
assert 'Mercury' in char.locations
def test_removes_empties(self):
char = Character()
char.tags('location').append('Mars')
char.tags('foreign').append('')
assert len(list(char.locations)) == 1
class TestHasLocations:
def test_foreign(self):
char = Character()
char.tags('foreign').append('Mars')
assert char.has_locations
def test_location(self):
char = Character()
char.tags('location').append('Mars')
assert char.has_locations
def test_both(self):
char = Character()
char.tags('location').append('Mars')
char.tags('foreign').append('Mercury')
assert char.has_locations
def test_empty(self):
char = Character()
char.tags('foreign').append('')
assert not char.has_locations
|
# Generated by Django 2.0.4 on 2018-06-12 01:18
from django.db import migrations
import multiselectfield.db.fields
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20180410_1101'),
]
operations = [
migrations.AlterField(
model_name='product',
name='size',
field=multiselectfield.db.fields.MultiSelectField(choices=[('32', '32'), ('34', '34'), ('36', '36'), ('38', '38'), ('40', '40'), ('42', '42'), ('44', '44')], max_length=20),
),
]
|
import numpy as np
from scipy import linalg
from pressio4py import logger, solvers, ode
class MySys1:
def createResidual(self):
return np.zeros(5)
def createJacobian(self):
return np.zeros((5,5))
def residual(self, stateIn, R):
for i in range(5):
R[i] = float(i)
def jacobian(self, stateIn, J):
count = 0.
for i in range(5):
for j in range(5):
J[i,j] = float(count)
count += 1.
class MyLinSolver1:
def __init__(self):
self.callCount_ = 0
def solve(self, A,b,x):
print("\n Python Lin solver")
gold_A = np.array([[ 0., 1., 2., 3., 4.],
[ 5., 6., 7., 8., 9.],
[10., 11., 12., 13., 14.],
[15., 16., 17., 18., 19.],
[20., 21., 22., 23., 24.]])
gold_b = np.array([0., 1., 2., 3., 4.])
assert(np.allclose(A, gold_A))
assert(np.allclose(b, gold_b))
print(A)
print(b)
def test_newton_raphson_1():
logger.initialize(logger.logto.terminal)
logger.setVerbosity([logger.loglevel.debug])
state = np.ones(5)
sys = MySys1()
lsO = MyLinSolver1()
nlsO = solvers.create_newton_raphson(sys, state, lsO)
nlsO.setUpdatingCriterion(solvers.update.Standard)
nlsO.setMaxIterations(2)
nlsO.setStoppingCriterion(solvers.stop.AfterMaxIters)
nlsO.solve(sys, state)
print(state)
logger.finalize()
class MySys2:
def createResidual(self):
return np.zeros(2)
def createJacobian(self):
return np.zeros((2,2))
def residual(self, x, R):
R[0] = x[0]*x[0]*x[0] + x[1] - 1.0
R[1] = -x[0] + x[1]*x[1]*x[1] + 1.0
def jacobian(self, x, J):
J[0, 0] = 3.0*x[0]*x[0]
J[0, 1] = 1.0
J[1, 0] = -1.0
J[1, 1] = 3.0*x[1]*x[1]
class MyLinSolver2:
def solve(self, A,b,x):
print("\n Python Lin solver")
lumat, piv, info = linalg.lapack.dgetrf(A, overwrite_a=False)
x[:], info = linalg.lapack.dgetrs(lumat, piv, b, 0, 0)
def test_newton_raphson_2():
print("\n")
logger.initialize(logger.logto.terminal)
logger.setVerbosity([logger.loglevel.debug])
state = np.array([0.001, 0.0001])
sys = MySys2()
lsO = MyLinSolver2()
nlsO = solvers.create_newton_raphson(sys, state, lsO)
#nlsO.setUpdatingCriterion(solvers.update.Standard)
#nlsO.setMaxIterations(2)
#nlsO.setStoppingCriterion(solvers.stop.AfterMaxIters)
nlsO.solve(sys, state)
gold = np.array([1., 0.])
np.allclose(gold, state)
print(state)
logger.finalize()
|
from flask import render_template, redirect, url_for, flash
from flask_login import login_required, current_user
from flask_admin.contrib.sqla import ModelView
from . forms import PostForm, CommentForm, SubscribersForm
from .import main
from .. import db, basic_auth
import markdown2
from .email import mail_message
from ..models import User, Post, Role, Comment, Subscribers
from datetime import datetime
@main.route('/', methods=['GET', 'POST'])
def index():
title = "Blog On | Home "
all = Post.query.order_by('-id').all()
print(f'blogs {all}')
subscribers = SubscribersForm()
try:
if subscribers.validate_on_submit():
subscriber = Subscribers(email=subscribers.email.data)
db.session.add(subscriber)
db.session.commit()
flash('You are now subscribed!')
mail_message("Welcome to Blog On", "email/welcome",
subscriber.email, subscriber=subscriber)
print("sent")
return redirect(url_for('main.index'))
except:
return redirect(url_for('main.index'))
return render_template('index.html', title=title, posts=all, subscribers=subscribers)
@main.route('/profile/<username>')
@login_required
def profile(username):
user = User.query.filter_by(username=username).first_or_404()
title = "Profile"
return render_template('profile.html', user=user, title=title)
@main.route('/post', methods=['GET', 'POST'])
def post():
all = Post.query.all()
all.reverse()
print(all)
Comments = CommentForm()
if Comments.validate_on_submit():
comment = Comment(comment=Comments.comment.data,
commenter=Comments.commenter.data)
db.session.add(comment)
db.session.commit()
print(comment)
return redirect(url_for('main.post'))
allcomments = Comment.query.all()
title = "Post Article"
Blog = PostForm()
# try:
if Blog.validate_on_submit():
post = Post(title=Blog.title.data, post=Blog.Entry.data,
user_id=current_user.id, timeposted=datetime.utcnow())
db.session.add(post)
db.session.commit()
return redirect(url_for('main.post'))
return render_template('post.html', Post=Blog, title=title, posts=all, comment=Comments, allcomments=allcomments)
@main.route('/post/<id>', methods=['POST', 'GET'])
def fullpost(id):
title = f'Posts'
post = Post.query.filter_by(id=id).first()
Comments = CommentForm()
if Comments.validate_on_submit():
comment = Comment(comment=Comments.comment.data,
post_id=id, commenter=Comments.commenter.data)
db.session.add(comment)
db.session.commit()
print(comment)
return redirect(url_for('main.fullpost', id=post.id))
allcomments = Comment.query.all()
postcomments = Comment.query.filter_by(post_id=id).all()
return render_template('fullpost.html', title=title, post=post, comment=Comments, allcomments=allcomments, postcomments=postcomments)
@main.route('/<int:id>/delete', methods=['POST'])
@login_required
def delete(id):
get_post(id)
db = get_db()
db.execute('DELETE FROM post WHERE id = ?, (id,)')
db.commit()
return redirect(url_for('main.index'))
|
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import Client, TestCase
from ..models import Group
class TestGroupsViews(TestCase):
"""
TestCase class to test the groups views.
"""
def setUp(self):
# This client will be logged in, admin & subscriber of the `group`.
self.client = Client()
self.user = get_user_model().objects.create_user(
username='test_user',
email='test@gmail.com',
password='top_secret'
)
self.client.login(username='test_user', password='top_secret')
# Another logged in client.
self.other_client = Client()
self.other_user = get_user_model().objects.create_user(
username='other_test_user',
email='other_test@gmail.com',
password='top_secret'
)
self.other_client.login(
username='other_test_user', password='top_secret')
# Anonymous client.
self.anonymous_client = Client()
# This user will be banned in the `group`.
self.user_to_ban = get_user_model().objects.create_user(
username='user_to_ban',
email='user_to_ban@gmail.com',
password='top_secret'
)
self.group = Group.objects.create(
title='test title 1',
description='some random words'
)
# Make `user` the admin & subscriber.
self.group.admins.add(self.user)
self.group.subscribers.add(self.user)
# Ban `other_user` from group.
self.group.banned_users.add(self.other_user)
self.other_group = Group.objects.create(
title='test title 2',
description='some random words'
)
# def test_groups_page_view(self):
# """Test groups list view."""
# response = self.client.get(reverse('view_all_groups'))
# self.assertEqual(response.status_code, 200)
# self.assertTrue('groups' in response.context.keys())
# self.assertTrue('test title 1' in str(response.context['groups']))
#
# def test_banned_users_list(self):
# """Test banned users list view."""
# url = reverse('banned_users', kwargs={'group': self.group.slug})
# # When admin requests the list.
# response = self.client.get(url)
# self.assertEqual(response.status_code, 200)
# self.assertTrue('users' in response.context.keys())
# self.assertEqual(len(response.context['users']), 1)
# # When anonymous user requests the list.
# other_response = self.anonymous_client.get(url)
# self.assertRedirects(other_response, other_response.url, status_code=302)
#
# def test_ban_user_view(self):
# """Test ban user view functionality."""
# response = self.client.get(
# reverse('ban_user', kwargs={'group': self.group.slug,
# 'user_id': self.user_to_ban.id}))
# self.assertRedirects(response,
# reverse('banned_users', kwargs={'group': self.group.slug}), status_code=302)
#
# def test_user_subscription_list_view(self):
# """Test the users subscriptions list."""
# response = self.client.get(
# reverse('user_subscription_list', kwargs={'username': self.user.username}))
# self.assertEqual(response.status_code, 200)
# self.assertTrue('subscriptions' in response.context.keys())
# self.assertEqual(len(response.context['subscriptions']), 1)
# self.assertTrue('test title 1' in str(response.context['subscriptions']))
#
# def test_user_created_groups_page_view(self):
# """Test groups list created by certain user."""
# response = self.client.get(
# reverse('user_created_groups', kwargs={'username': self.user.username}))
# self.assertEqual(response.status_code, 200)
# self.assertTrue('user_groups' in response.context.keys())
# self.assertEqual(len(response.context['user_groups']), 1)
# self.assertTrue('test title 1' in str(response.context['user_groups']))
#
# def test_group_page_view(self):
# """Test group page view."""
# # logged in client
# response = self.client.get(
# reverse('group', kwargs={'group': self.group.slug}))
# self.assertEqual(response.status_code, 200)
# self.assertTrue('news' in response.context.keys())
# self.assertEqual(len(response.context['news']), 0)
# # anonymous client
# other_response = self.anonymous_client.get(
# reverse('group', kwargs={'group': self.group.slug}))
# self.assertEqual(response.status_code, 200)
#
# def test_create_group_view(self):
# """Test the creation of groups."""
# # Interent connection is required to make this test pass.
# current_groups_count = Group.objects.count()
# response = self.client.post(reverse('new_group'),
# {'title': 'Not much of a title',
# 'description': 'babla', })
# self.assertEqual(response.status_code, 302)
# new_group = Group.objects.get(title='Not much of a title')
# self.assertEqual(new_group.title, 'Not much of a title')
# self.assertEqual(Group.objects.count(),
# current_groups_count + 1)
#
# def test_subscribe_group(self):
# """Test the subscribed ajax call & response."""
# response = self.other_client.get(reverse('subscribe', kwargs={'group': self.group.slug}),
# HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# # `other_user` is banned in previous test so it'll raise PermissionDenied.
# self.assertEqual(response.status_code, 403)
# self.assertEqual(self.group.subscribers.count(), 1)
#
# def test_edit_group_cover_view(self):
# """Test if non admin can edit group cover."""
# response = self.other_client.get(reverse('edit_group_cover', kwargs={'group': self.group.slug}))
# self.assertEqual(response.status_code, 403)
#
# def test_group_view_success_status_code(self):
# """Test group detail view with right url."""
# response = self.client.get(reverse('group', kwargs={'group': self.group.slug}))
# self.assertEqual(response.status_code, 200)
#
# def test_group_view_not_found_status_code(self):
# """Test group detail view with wrong url."""
# response = self.client.get(reverse('group', kwargs={'group': 'does-not-exists'}))
# self.assertEqual(response.status_code, 404)
|
from soad import AsymmetricData as asyd
import matplotlib.pyplot as plt
# This script is prepared for calculating the difference between
# PDFs (Probability Density Function) of two variable.
a = asyd(20.0, 1.6007810593582121, 1.6007810593582121,N=50000)
b = asyd(20.27602675930529, 1.521759265471423, 1.916585620152389,N=50000)
plt.plot(a.x_values,a.pdf_values, color="deepskyblue")
plt.plot(b.x_values,b.pdf_values, color="tomato")
plt.ylim(ymin=0)
plt.xlabel("x")
plt.ylabel("prob")
plt.savefig("diff.png", dpi=100)
def find_match_percent(a, b):
sum = 0.0
c = (a.x_limits[1]-a.x_limits[0])/float(a.N)
for i in a.x_values:
if (a.pdf(i) >= b.pdf(i)):
sum += c * b.pdf(i)
else:
sum += c * a.pdf(i)
return sum
area = find_match_percent(a, b)
print("Matching area ratio is: {:.3}".format(area))
plt.show()
|
import random
import uuid
from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase, RequestFactory
from django.urls import reverse
from django.contrib.auth import get_user_model
from django.contrib.messages.storage.fallback import FallbackStorage
from ..views import view_basket, add_to_basket
from ..models import Basket, BasketItem
from products.models import Product
class ViewBasketTest(TestCase):
"""Test the GET view for ViewBasket"""
@classmethod
def setUpTestData(cls):
# add test products
number_of_products = 5
for product_number in range(number_of_products):
title = f'Doggie Treats {product_number}'
brand = 'Pawfect'
category = 'Dog'
price = round(random.uniform(0, 50), 2)
description = 'Doggie Treats'
stock = round(random.uniform(0, 100), 2)
is_live = True
product_details = {
'title': title,
'brand': brand,
'category': category,
'price': price,
'stock': stock,
'description': description,
'image': SimpleUploadedFile(
name='image.jpg',
content=open(settings.BASE_DIR +
'/test/image.jpg', 'rb').read(),
content_type='image/jpeg'
),
'is_live': is_live
}
Product.objects.create(**product_details)
def setUp(self):
self.manual_url = '/basket/'
self.reverse_url = reverse('basket')
self.factory = RequestFactory()
# create basket
self.basket = Basket.objects.create(user=None)
# set product variables
self.product1 = Product.objects.get(title='Doggie Treats 1')
self.product2 = Product.objects.get(title='Doggie Treats 2')
def test_view_url_exists(self):
"""Check that the hardcoded page exists"""
response = self.client.get(self.manual_url)
self.assertEqual(response.status_code, 200)
def test_view_reverse_url(self):
"""Check that the named view page exists"""
response = self.client.get(self.reverse_url)
self.assertEqual(response.status_code, 200)
def test_basket_without_items(self):
"""check output when basket is empty"""
response = self.client.get(self.reverse_url)
self.assertContains(response, 'You do not have any items in your '
'basket')
def test_basket_with_items(self):
"""Add items to basket and check that they are displayed on basket
view"""
quantity = 3
expected_total = self.product1.price * quantity
# add items to basket
BasketItem.objects.create(
basket=self.basket, product=self.product1, quantity=quantity)
# create request
request = self.factory.get(self.reverse_url)
# add basket object to request, required by view
request.basket = self.basket
response = view_basket(request)
# perform tests
self.assertContains(response, 'Shopping Basket')
self.assertContains(response, self.product1.title)
self.assertContains(response, self.product1.price)
self.assertContains(response, expected_total)
def test_basket_icon_badge_reflects_basket_quantity(self):
"""Test that the basket badge in the navbar always reflects the
quantity of items in the basket"""
quantity1 = 2
quantity2 = 5
# add items to basket
BasketItem.objects.create(
basket=self.basket, product=self.product1, quantity=quantity1)
BasketItem.objects.create(
basket=self.basket, product=self.product2, quantity=quantity2)
total_quantity = quantity1 + quantity2
# create request
request = self.factory.get(self.reverse_url)
# add basket object to request, required by view
request.basket = self.basket
response = view_basket(request)
# perform tests
self.assertContains(response, '<span class="badge badge-pill '
f'badge-warning cart-badge">{total_quantity}'
'</span>')
def test_update_basket(self):
"""Update quantity of items in basket"""
quantity = 3
BasketItem.objects.create(
basket=self.basket, product=self.product1, quantity=quantity)
# create request
request = self.factory.get(self.reverse_url)
# add basket object to request, required by view
request.basket = self.basket
response = view_basket(request)
# perform intial test
self.assertContains(response,
f'There are {quantity} item(s) in your basket.')
# update quantity and re-run test
updated_quantity = 4
BasketItem.objects.all().update(quantity=updated_quantity)
response = view_basket(request)
# perform intial test
self.assertContains(response,
f'There are {updated_quantity} item(s) '
'in your basket.')
def test_item_subtotal(self):
"""Check item subtotals output the correct amount"""
# set product variables
quantity1 = 6
quantity2 = 3
# add items to basket
item1 = BasketItem.objects.create(
basket=self.basket, product=self.product1, quantity=quantity1)
item2 = BasketItem.objects.create(
basket=self.basket, product=self.product2, quantity=quantity2)
# create request
request = self.factory.get(self.reverse_url)
# add basket object to request, required by view
request.basket = self.basket
response = view_basket(request)
subtotal1 = self.product1.price * quantity1
subtotal2 = self.product2.price * quantity2
self.assertEqual(item1.subtotal(), subtotal1)
self.assertEqual(item2.subtotal(), subtotal2)
# check basket for subtotal values
self.assertContains(response, subtotal1)
self.assertContains(response, subtotal2)
def test_basket_total(self):
"""Check basket total outputs the correct amount"""
# set product variables
quantity1 = 6
quantity2 = 3
# add items to basket
BasketItem.objects.create(
basket=self.basket, product=self.product1, quantity=quantity1)
BasketItem.objects.create(
basket=self.basket, product=self.product2, quantity=quantity2)
# create request
request = self.factory.get(self.reverse_url)
# add basket object to request, required by view
request.basket = self.basket
response = view_basket(request)
subtotal1 = self.product1.price * quantity1
subtotal2 = self.product2.price * quantity2
total = subtotal1 + subtotal2
# run tests
self.assertEqual(self.basket.total(), total)
self.assertContains(response, total)
def test_basket_is_emptied_upon_logout(self):
"""Test that basket is cleared when a user logs out of account"""
email = 'test@user.com'
password = 'pass1234'
user = get_user_model().objects.create_user(
username=email,
email=email,
password=password)
user_basket = Basket.objects.create(user=user)
quantity1 = 1
quantity2 = 2
# add items to basket
BasketItem.objects.create(
basket=user_basket, product=self.product1, quantity=quantity1)
BasketItem.objects.create(
basket=user_basket, product=self.product2, quantity=quantity2)
# log in to user account (returns boolean)
logged_in = self.client.login(email=email, password=password)
# make sure the user logged in successfully
self.assertTrue(logged_in)
# goto basket and check that the items are there
response = self.client.get(self.reverse_url)
self.assertContains(response, self.product1.title)
self.assertContains(response, self.product2.title)
# log out of account
self.client.logout()
# load basket view and check contents
response = self.client.get(self.reverse_url)
# make sure user is no longer logged in
self.assertTrue(response.context['user'].is_authenticated is False)
# now check basket contents
self.assertNotContains(response, self.product1.title)
self.assertNotContains(response, self.product2.title)
self.assertContains(
response, 'You do not have any items in your basket.')
class AddToBasketTest(TestCase):
"""Test how products are added to the basket using add_to_basket view"""
@classmethod
def setUpTestData(cls):
# add test products
number_of_products = 10
for product_number in range(number_of_products):
title = f'Doggie Treats {product_number}'
brand = 'Pawfect'
category = 'Dog'
price = round(random.uniform(0, 50), 2)
description = 'Doggie Treats'
stock = round(random.uniform(0, 100), 2)
is_live = True
product_details = {
'title': title,
'brand': brand,
'category': category,
'price': price,
'stock': stock,
'description': description,
'image': SimpleUploadedFile(
name='image.jpg',
content=open(settings.BASE_DIR +
'/test/image.jpg', 'rb').read(),
content_type='image/jpeg'
),
'is_live': is_live
}
Product.objects.create(**product_details)
def setUp(self):
""" """
# create products
self.basket_url = reverse('basket')
self.factory = RequestFactory()
# create basket
self.basket = Basket.objects.create(user=None)
def test_add_valid_product(self):
"""Check that adding product with a valid product uuid, results in
product being added to the basket view"""
product = Product.objects.get(title='Doggie Treats 9')
url = reverse('add_to_basket', kwargs={'product_id': product.id})
# create request
request = self.factory.get(url)
# add middleware to request
request.session = {}
request.session['basket_id'] = self.basket.id
request._messages = FallbackStorage(request)
# add basket object to request, required by view
request.basket = self.basket
# process view
response = add_to_basket(request, product.id)
# check that response returns redirect to basket
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/basket/')
# given add_to_basket results in a redirect, which can not be handled
# by RequestFactory, basket view is manually called and tests run
# against this view
basket_response = view_basket(request)
# test that product appears on basket view
self.assertContains(basket_response, 'Shopping Basket')
self.assertContains(basket_response, product.title)
def test_add_invalid_product(self):
"""Check that adding product with an invalid product uuid results in
an error being raised"""
# create a random uuid
product_id = uuid.uuid4()
url = reverse('add_to_basket', kwargs={'product_id': product_id})
# create request
response = self.client.get(url)
# product id is invalid so view should return 404 not found
self.assertEqual(response.status_code, 404)
def test_add_product_with_invalid_uuid(self):
"""Test that if user provides an invalid uuid, an error is raised"""
# create a random uuid
product_id = 'abcd2123'
url = '/basket/add/' + product_id + '/'
# create request
response = self.client.get(url)
# product id is invalid so view should return 404 not found
self.assertEqual(response.status_code, 404)
def test_add_item_exceed_maximum_quantity(self):
"""Ensure that a basket item cannot exceed maximum permitted amount"""
# store product details
product = Product.objects.get(title='Doggie Treats 9')
quantity = 4
# add item to basket
BasketItem.objects.create(
basket=self.basket, product=product, quantity=quantity)
# now that basket has item at maximum permitted quantity, try to add
# product (which increases quantity by +1) - this should NOT result in
# a quantity increase
url = reverse('add_to_basket', kwargs={'product_id': product.id})
# create request
request = self.factory.get(url)
# add middleware to request
request.session = {}
request.session['basket_id'] = self.basket.id
request._messages = FallbackStorage(request)
# add basket object to request, required by view
request.basket = self.basket
# before running the add_to_basket view, check quantity in basket
basket_response = view_basket(request)
self.assertContains(basket_response, f'There are {quantity} item(s)'
' in your basket')
# run the view
# item is already in basket, therefore, quantity will be increased
# BUT only if it does not exceed max quantity
add_to_basket(request, product.id) # results in +1 quantity
# load basket and check quantity
basket_response = view_basket(request)
self.assertContains(basket_response, f'There are {quantity + 1} '
'item(s) in your basket')
# run view again - this time quantity should not change
# because max quantity has been reached
add_to_basket(request, product.id) # results in +1 quantity
# load basket and check quantity
basket_response = view_basket(request)
self.assertContains(basket_response, f'There are {quantity + 1} '
'item(s) in your basket')
class GetBasketTest(TestCase):
"""Test view for merging basket, if anonymous user logs in to
account/restoring user's basket"""
@classmethod
def setUpTestData(cls):
# add test products
number_of_products = 5
for product_number in range(number_of_products):
title = f'Doggie Treats {product_number}'
brand = 'Pawfect'
category = 'Dog'
price = round(random.uniform(0, 50), 2)
description = 'Doggie Treats'
stock = round(random.uniform(0, 100), 2)
is_live = True
product_details = {
'title': title,
'brand': brand,
'category': category,
'price': price,
'stock': stock,
'description': description,
'image': SimpleUploadedFile(
name='image.jpg',
content=open(settings.BASE_DIR +
'/test/image.jpg', 'rb').read(),
content_type='image/jpeg'
),
'is_live': is_live
}
Product.objects.create(**product_details)
def setUp(self):
self.basket_url = reverse('basket')
self.email = 'test@test.com'
self.password = 'test1234'
self.user = get_user_model().objects.create_user(
username=self.email,
email=self.email,
password=self.password)
# create a basket for the user
self.basket = Basket.objects.create(user=self.user)
# get products for adding to user basket
self.product1 = Product.objects.get(title='Doggie Treats 1')
self.product2 = Product.objects.get(title='Doggie Treats 2')
self.quantity1 = 4
self.quantity2 = 1
# add items to the user's basket
self.item1 = BasketItem.objects.create(
basket=self.basket, product=self.product1, quantity=self.quantity1)
self.item2 = BasketItem.objects.create(
basket=self.basket, product=self.product2, quantity=self.quantity2)
def test_restore_users_basket(self):
"""Test that user's stored basket is restored when they log in"""
# user is not logged in - anonymous user
# check contents of basket, prior to log-in (expect to be empty)
response = self.client.get(self.basket_url)
self.assertContains(response, 'You do not have any items in '
'your basket')
# log in to user account and check basket contents match those above
self.client.force_login(self.user)
# setup test variables
total = self.quantity1 + self.quantity2
# goto basket and check contents
response = self.client.get(self.basket_url)
# check each product appears in user's basket
self.assertContains(response, self.product1.title)
self.assertContains(response, self.product2.title)
# check model quantities against test quantities
self.assertContains(response, f'There are {total} '
'item(s) in your basket.')
self.assertEqual(total, self.basket.count())
def test_merge_users_basket(self):
"""Test that contents of anonymous basket are merged into user's stored
basket upon account login (uses user_logged_in signal) """
# get product object
new_product = Product.objects.get(title='Doggie Treats 3')
# user is currently anonymous - add items to anonymous user's basket
# via add_to_basket view, this will redirect user to basket view
response = self.client.get(
reverse('add_to_basket', kwargs={'product_id': new_product.id}),
follow=True)
self.assertContains(response, 'Shopping Basket')
self.assertContains(response, new_product.title)
# by default add_to_basket view sets quantity to 1
self.assertContains(response, 'There are 1 item(s) in your basket.')
# log in to user account - expect basket above to merge with the basket
# created in setUp() method
login_details = {
'login': self.email,
'password': self.password
}
login_response = self.client.post(
'/accounts/login/', login_details, follow=True)
# make sure the user logged in successfully
self.assertTrue(login_response.context['user'].is_authenticated)
# goto basket and check contents
response = self.client.get(self.basket_url)
# setup test variables
new_product_quantity = 1
total = new_product_quantity + self.quantity1 + self.quantity2
# run tests
# product added before login
self.assertContains(response, new_product.title)
# products that already existed in the users saved basket
self.assertContains(response, self.product2.title)
self.assertContains(response, self.product2.title)
# check model quantities against test quantities
self.assertContains(response, f'There are {total} '
'item(s) in your basket.')
self.assertEqual(total, self.basket.count())
|
#!/usr/bin/python3
import unicodedata
FILENAME = 'xwordlist.dict'
def strip_accents(s):
"""String accents from a string"""
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
def read():
parsed = []
used_words = set()
with open(FILENAME, 'r') as f:
for line in f:
# Remove leading and trailing whitespace
line = line.strip()
# Split out the word and score
try:
word, score = line.split(';')
except:
continue
# Don't bother if we don't have a word
if not word:
continue
## Normalize the word ##
# Strip extraneous whitespace
word = word.strip()
# Strip accents
word = strip_accents(word)
# Make uppercase
word = word.upper()
# Remove any non-alphanumeric characters
word = ''.join(c for c in word if c.isalnum())
# Don't bother if we don't have a score
if score is None:
continue
# Cast score as an int
score = int(score.strip())
# Don't use words more than once
if word in used_words:
continue
else:
used_words.add(word)
# Add this word to our collection
parsed.append({"word": word, "score": score})
return parsed
def sort(words):
return sorted(words, key=lambda x: x['word'])
def write(sorted_words):
with open(FILENAME, 'w') as f:
for w in sorted_words:
word, score = w['word'] ,w['score']
f.write(f'{word};{score}\n')
print(f"Successfully sorted dictionary with {len(words)} words!")
if __name__ == '__main__':
words = sort(read())
# Double-check the list is roughly as long as expected
assert len(words) > 425000, f"Word list is too short ({len(words)} words), cancelling"
write(words)
|
from citc.aws import AwsNode
from citc.cloud import NodeState
from citc.slurm import SlurmNode
from citc.watchdog import crosscheck
def test_crosscheck_empty():
slurm_nodes = []
cloud_nodes = []
res = crosscheck(slurm_nodes, cloud_nodes)
res = list(res)
assert not res
def test_crosscheck_one_match():
slurm_nodes = [
SlurmNode(name="foo-1", state="idle", state_flag=None, features={}, reason="")
]
cloud_nodes = [AwsNode(name="foo-1", state=NodeState.RUNNING, ip="10.0.0.25", id="i-foobar")]
res = crosscheck(slurm_nodes, cloud_nodes)
res = list(res)
assert not res
def test_missing_node_down():
slurm_nodes = [
SlurmNode(name="foo-1", state="down", state_flag=None, features={}, reason="")
]
cloud_nodes = []
res = crosscheck(slurm_nodes, cloud_nodes)
assert slurm_nodes[0].resume in res
def test_idle_node_off():
slurm_nodes = [
SlurmNode(name="foo-1", state="idle", state_flag="~", features={}, reason="")
]
cloud_nodes = []
res = crosscheck(slurm_nodes, cloud_nodes)
res = list(res)
assert not res
|
"""Reader file to parse Seward Electric Association 15-minute
meter data provided by Chugach Electric.
"""
from io import StringIO
import csv
from .base_reader import BaseReader
class Reader(BaseReader):
def read_header(self, fobj, file_name):
# There is one header line
return [fobj.readline()]
def parse_line(self, lin):
# Use the csv module to properly split the line into fields, accounting
# for quoted strings.
f = StringIO(lin)
rdr = csv.reader(f)
fields = next(rdr)
meter_num, dt, tm, _, kw, *rest = fields
meter_num = meter_num.strip().lower()
# String versions of the date components
yr = dt[-2:]
da = dt[-4:-2]
mo = '%02d' % int(dt[-6:-4])
# String versions of the time components
min = tm[-2:]
hr = tm[-4:-2]
if hr == '':
hr = '0'
# hr may equal 24, which is invalid. Convert to 0 and add a day later
if hr == '24':
add_a_day = True
hr = '0'
else:
add_a_day = False
hr = '%02d' % int(hr)
dt_tm_str = f'20{yr}-{mo}-{da} {hr}:{min}'
ts = self.ts_from_date_str(dt_tm_str, '%Y-%m-%d %H:%M')
if add_a_day:
ts += 24*3600
# put the timestamp in the middle of the 15 minute interval
ts -= 7.5 * 60
kw = float(kw.replace('"', '').replace(',', ''))
return [(ts, f'ses_{meter_num}', kw)]
|
class Solution(object):
def totalNQueens(self, n):
"""
:type n: int
:rtype: int
"""
return self.nQueensHelper([-1] * n, 0)
def nQueensHelper(self, array, count):
length = len(array)
if count == length:
return 1
result = 0
for i in range(length):
flag = True
for j in range(count):
if i == array[j] or array[j] - j + count == i or array[j] + j - count == i:
flag = False
break
if flag:
array[count] = i
result += self.nQueensHelper(array, count+1)
return result |
# coding: utf-8
import pytest
from scraper.src.config.config_loader import ConfigLoader
from .abstract import config
from .mocked_init import MockedInit
class TestGetExtraFacets:
def test_extra_facets_should_be_empty_by_default(self):
c = config()
actual = ConfigLoader(c)
assert actual.get_extra_facets() == []
@pytest.mark.chromedriver
@pytest.mark.usefixtures("chromedriver")
def test_extra_facets_should_be_set_from_start_urls_variables_browser(self,
monkeypatch):
monkeypatch.setattr("selenium.webdriver.chrome",
lambda x: MockedInit())
c = config({
"start_urls": [
{
"url": "https://test.com/doc/(?P<type_of_content>.*?)/",
"variables": {
"type_of_content": ["book", "bundles", "reference",
"components", "cookbook",
"best_practices"]
}
}
]
})
actual = ConfigLoader(c)
assert actual.get_extra_facets() == ["type_of_content"]
@pytest.mark.chromedriver
@pytest.mark.usefixtures("chromedriver")
def test_extra_facets_should_be_set_from_start_urls_variables_with_two_start_url_browser(
self, monkeypatch):
monkeypatch.setattr("selenium.webdriver.chrome",
lambda x: MockedInit())
c = config({
"js-render": True,
"start_urls": [
{
"url": "https://test.com/doc/(?P<type_of_content>.*?)/",
"variables": {
"type_of_content": ["book", "bundles", "reference",
"components", "cookbook",
"best_practices"]
}
},
{
"url": "https://test.com/doc/(?P<type_of_content>.*?)/",
"variables": {
"type_of_content": ["test"]
}
}
]
})
actual = ConfigLoader(c)
assert actual.get_extra_facets() == ["type_of_content"]
@pytest.mark.chromedriver
@pytest.mark.usefixtures("chromedriver")
def test_extra_facets_should_be_set_from_start_urls_variables_with_multiple_tags_browser(
self, monkeypatch):
monkeypatch.setattr("selenium.webdriver.chrome",
lambda x: MockedInit())
c = config({
"start_urls": [
{
"url": "https://test.com/doc/(?P<type_of_content>.*?)/(?P<version>.*?)",
"variables": {
"type_of_content": ["book", "bundles", "reference",
"components", "cookbook",
"best_practices"],
"version": ["1.0", "2.0"]
}
},
{
"url": "https://test.com/doc/(?P<type_of_content>.*?)/",
"variables": {
"type_of_content": ["test"]
}
}
]
})
actual = ConfigLoader(c)
extra_facets = actual.get_extra_facets()
assert "version" in extra_facets
assert "type_of_content" in extra_facets
|
import networkx as nx
import graph
def test_check_shortest_path():
# load data
graph_data = graph.graph_loader('data.txt')
# create graph
nx_graph = graph.get_nx_graph(graph_data)
my_graph = graph.get_my_graph(graph_data)
# get shortest_path
expected_path = nx.shortest_path(nx_graph, source='A', target='D')
my_path = graph.my_shortest_path(my_graph, 'A', 'D')
assert my_path == expected_path
|
from kazoo.client import KazooClient
from kazoo.exceptions import CancelledError
import gevent
from gevent import Greenlet
from consistent_hash import ConsistentHash
import logging
""" Partition Library
This library provides functionality to implement partition sharing between
cluster nodes
"""
class PartitionClient(object):
""" Client Class for the Partition Library
Example usage:
---------------------
import libpartition
from libpartition.libpartition import PartitionClient
def own_change_cb(l):
print "ownership change:" + str(l)
c = PartitionClient("test", "s1", ["s1", "s2", "s3"], 32,
own_change_cb, "zookeeper_s1")
##do some real work now"
if (c.own_partition(1)):
...... do something with partition #1 .....
.........
...
c.update_cluster_list(["s1", "s2"])
...
----------------------
You should not call any partition library routine from within the
callback function
Args:
app_name(str): Name of the app for which partition cluster is used
self_name(str): Name of the local cluster node (can be ip address)
cluster_list(list): List of all the nodes in the cluster including
local node
max_partition(int): Partition space always go from 0..max_partition-1
partition_update_cb: Callback function invoked when partition
ownership list is updated.x
zk_server(str): <zookeeper server>:<zookeeper server port>
"""
def __init__(
self, app_name, self_name, cluster_list, max_partition,
partition_update_cb, zk_server, logger = None):
# Initialize local variables
self._zk_server = zk_server
self._cluster_list = set(cluster_list)
self._max_partition = max_partition
self._update_cb = partition_update_cb
self._curr_part_ownership_list = []
self._target_part_ownership_list = []
self._con_hash = ConsistentHash(cluster_list)
self._name = self_name
# some sanity check
if not(self._name in cluster_list):
raise ValueError('cluster list is missing local server name')
# initialize logging and other stuff
if logger is None:
logging.basicConfig()
self._logger = logging
else:
self._logger = logger
self._conn_state = None
self._sandesh_connection_info_update(status='INIT', message='')
# connect to zookeeper
self._zk = KazooClient(zk_server)
while True:
try:
self._zk.start()
break
except gevent.event.Timeout as e:
# Update connection info
self._sandesh_connection_info_update(status='DOWN',
message=str(e))
gevent.sleep(1)
# Zookeeper is also throwing exception due to delay in master election
except Exception as e:
# Update connection info
self._sandesh_connection_info_update(status='DOWN',
message=str(e))
gevent.sleep(1)
# Update connection info
self._sandesh_connection_info_update(status='UP', message='')
# Done connecting to ZooKeeper
# create a lock array to contain locks for each partition
self._part_locks = []
for part in range(0, self._max_partition):
lockpath = "/lockpath/"+ app_name + "/" + str(part)
l = self._zk.Lock(lockpath, self._name)
self._part_locks.append(l)
# initialize partition # to lock acquire greenlet dictionary
self._part_lock_task_dict = {}
self._logger.error("initial servers:" + str(self._cluster_list))
# update target partition ownership list
for part in range(0, self._max_partition):
if (self._con_hash.get_node(str(part)) == self._name):
self._target_part_ownership_list.append(part)
# update current ownership list
self._acquire_partition_ownership()
#end __init__
def _sandesh_connection_info_update(self, status, message):
from pysandesh.connection_info import ConnectionState
from pysandesh.gen_py.process_info.ttypes import ConnectionStatus, \
ConnectionType
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
new_conn_state = getattr(ConnectionStatus, status)
ConnectionState.update(conn_type = ConnectionType.ZOOKEEPER,
name = 'Zookeeper', status = new_conn_state,
message = message,
server_addrs = self._zk_server.split(','))
if (self._conn_state and self._conn_state != ConnectionStatus.DOWN and
new_conn_state == ConnectionStatus.DOWN):
msg = 'Connection to Zookeeper down: %s' %(message)
self._logger.error(msg)
if (self._conn_state and self._conn_state != new_conn_state and
new_conn_state == ConnectionStatus.UP):
msg = 'Connection to Zookeeper ESTABLISHED'
self._logger.error(msg)
self._conn_state = new_conn_state
# end _sandesh_connection_info_update
# following routine is the greenlet task function to acquire the lock
# for a partition
def _acquire_lock(self, part):
# lock for the partition
l = self._part_locks[part]
# go in an infinite loop waiting to acquire the lock
try:
while True:
ret = l.acquire(blocking=False)
if ret == True:
self._logger.error("Acquired lock for:" + str(part))
self._curr_part_ownership_list.append(part)
self._update_cb(self._curr_part_ownership_list)
return True
else:
gevent.sleep(1)
except CancelledError:
self._logger.error("Lock acquire cancelled for:" + str(part))
return False
except Exception as ex:
# TODO: If we have a non-KazooException, the lock object
# may get stuck in the "cancelled" state
self._logger.error("Lock acquire unexpected error!: " + str(ex))
# This exception should get propogated to main thread
raise SystemExit
return False
#end _acquire_lock
# get rid of finished spawned tasks from datastructures
def _cleanup_greenlets(self):
for part in self._part_lock_task_dict.keys():
if (self._part_lock_task_dict[part].ready()):
del self._part_lock_task_dict[part]
#end _cleanup_greenlets
# following routine launches tasks to acquire partition locks
def _acquire_partition_ownership(self):
# cleanup any finished greenlets
self._cleanup_greenlets()
# this variable will help us decide if we need to call callback
updated_curr_ownership = False
# list of partitions for which locks have to be released
release_lock_list = []
self._logger.info("known servers: %s" % self._con_hash.get_all_nodes())
for part in range(0, self._max_partition):
if (part in self._target_part_ownership_list):
if (part in self._curr_part_ownership_list):
# do nothing, I already have ownership of this partition
self._logger.info("No need to acquire ownership of:" +
str(part))
else:
# I need to acquire lock for this partition before I own
if (part in self._part_lock_task_dict.keys()):
try:
self._part_lock_task_dict[part].get(block=False)
except:
# do nothing there is already a greenlet running to
# acquire the lock
self._logger.error("Already a greenlet running to"
" acquire:" + str(part))
continue
# Greenlet died without getting ownership. Cleanup
self._logger.error("Cleanup stale greenlet running to"
" acquire:" + str(part))
del self._part_lock_task_dict[part]
self._logger.error("Starting greenlet running to"
" acquire:" + str(part))
# launch the greenlet to acquire the loc, k
g = Greenlet.spawn(self._acquire_lock, part)
self._part_lock_task_dict[part] = g
else:
# give up ownership of the partition
# cancel any lock acquisition which is ongoing
if (part in self._part_lock_task_dict.keys()):
try:
self._part_lock_task_dict[part].get(block=False)
except:
self._logger.error("canceling lock acquisition going on \
for:" + str(part))
# Cancelling the lock should result in killing the gevent
self._part_locks[part].cancel()
self._part_lock_task_dict[part].get(block=True)
del self._part_lock_task_dict[part]
if (part in self._curr_part_ownership_list):
release_lock_list.append(part)
self._curr_part_ownership_list.remove(part)
updated_curr_ownership = True
self._logger.error("giving up ownership of:" + str(part))
if (updated_curr_ownership is True):
# current partition membership was updated call the callback
self._update_cb(self._curr_part_ownership_list)
if (len(release_lock_list) != 0):
# release locks which were acquired
for part in release_lock_list:
self._logger.error("release the lock which was acquired:" + \
str(part))
try:
self._part_locks[part].release()
self._logger.error("fully gave up ownership of:" + str(part))
except:
pass
#end _acquire_partition_ownership
def update_cluster_list(self, cluster_list):
""" Updates the cluster node list
Args:
cluster_list(list): New list of names of the nodes in
the cluster
Returns:
None
"""
# some sanity check
if not(self._name in cluster_list):
raise ValueError('cluster list is missing local server name')
new_cluster_list = set(cluster_list)
new_servers = list(new_cluster_list.difference(
self._cluster_list))
deleted_servers = list(set(self._cluster_list).difference(
new_cluster_list))
self._cluster_list = set(cluster_list)
# update the hash structure
if new_servers:
self._logger.error("new servers:" + str(new_servers))
self._con_hash.add_nodes(new_servers)
if deleted_servers:
self._logger.error("deleted servers:" + str(deleted_servers))
self._con_hash.del_nodes(deleted_servers)
# update target partition ownership list
self._target_part_ownership_list = []
for part in range(0, self._max_partition):
if (self._con_hash.get_node(str(part)) == self._name):
if not (part in self._target_part_ownership_list):
self._target_part_ownership_list.append(part)
# update current ownership list
self._acquire_partition_ownership()
#end update_cluster_list
def own_partition(self, part_no):
""" Returns ownership information of a partition
Args:
part_no(int) : Partition no
Returns:
True if partition is owned by the local node
False if partition is not owned by the local node
"""
return part_no in self._curr_part_ownership_list
#end own_partition
def close(self):
""" Closes any connections and frees up any data structures
Args:
Returns:
None
"""
# clean up greenlets
for part in self._part_lock_task_dict.keys():
try:
self._logger.error("libpartition greenlet cleanup %s" % str(part))
self._part_lock_task_dict[part].kill()
except:
pass
self._logger.error("Stopping libpartition")
# close zookeeper
try:
self._zk.stop()
except:
self._logger.error("Stopping libpartition failed")
else:
self._logger.error("Stopping libpartition successful")
self._logger.error("Closing libpartition")
try:
self._zk.close()
except:
self._logger.error("Closing libpartition failed")
else:
self._logger.error("Closing libpartition successful")
#end close
|
"""
A square triple (a,b,c) is a triple where a, b, and c are integers and a2 + b2 = c2.
Given an integer n, return the number of square triples such that 1 <= a, b, c <= n.
Source - https://leetcode.com/problems/count-square-sum-triples/
"""
"""
Time Complexity - O(n^2)
Space Complexity - O(1)
"""
class Solution:
def countTriples(self, n: int) -> int:
# Counetr to hold result
res = 0
# Iterate over integers in range [1,n] - fixing value for 'a'
for a in range(1,n+1):
# Iterate over integers in range [1,n] - fixing value for 'b'
for b in range(1, n+1):
# Find value of c using 'a' and 'b'
c = (a*a + b*b) ** 0.5
# Check if c is a square root of an integer less than 'n'
if c <= n and int(c) == c:
# If holds a valid value then increment result counter
res += 1
# Return result
return res
|
from syft.spdz.interface import (
base_interface,
distributed_interface,
grid_client_interface,
grid_worker_interface,
)
s = str(base_interface)
s += str(distributed_interface)
s += str(grid_client_interface)
s += str(grid_worker_interface)
|
from pypise import Pypise, TestCase, TestRunner
from parameterized import parameterized
class BaiduTest(TestCase):
"""Baidu serach test case"""
@classmethod
def setUpClass(cls):
""" Setting browser driver, Using chrome by default."""
cls.driver = Pypise("chrome")
cls.timeout = 15 # You can customize timeout time
"""
A simple test
"""
def test_case(self):
""" baidu search key : pypise """
self.open("https://www.baidu.com/")
self.move_to_element("link_text=>设置")
self.click("link_text=>搜索设置")
self.select("#nr", '20')
self.click("class=>prefpanelgo")
self.sleep(2)
self.assertAlert("已经记录下您的使用偏好")
self.accept_alert()
"""
used parameterized test
"""
@parameterized.expand([
(1, 'pypise'),
(2, 'selenium'),
(3, 'unittest'),
])
def test_baidu(self,name,search_key):
''' baidu search key : pypise '''
self.open("https://www.baidu.com")
self.clear("id=>kw")
self.type("id=>kw", search_key)
self.click("css=>#su")
self.assertTitle(search_key)
if __name__ == '__main__':
runner = TestRunner('./', '百度测试用例', '测试环境:Firefox')
runner.debug()
'''
说明:
'./' : 指定测试目录。
'百度测试用例' : 指定测试项目标题。
'测试环境:Chrome' : 指定测试环境描述。
debug() # debug模式不生成测试报告
run() # run模式生成测试报告
'''
|
import urllib.request
import json
def _get_and_parse_json(url):
# print(url)
req = urllib.request.Request(url,
headers={'Content-type': 'application/json',
'Accept': 'application/json'})
f = urllib.request.urlopen(req)
parsed = json.loads(f.read())
# {'startTime': '2018-03-02T06:26:10.544GMT', 'batchDuration': 1000, 'numReceivers': 1, 'numActiveReceivers': 1,
# 'numInactiveReceivers': 0, 'numTotalCompletedBatches': 29, 'numRetainedCompletedBatches': 29,
# 'numActiveBatches': 0, 'numProcessedRecords': 28, 'numReceivedRecords': 28, 'avgInputRate': 0.9655172413793104,
# 'avgSchedulingDelay': 5, 'avgProcessingTime': 122, 'avgTotalDelay': 127}
return parsed
# {
# "cpu_pause_ms": 20,
# "message_bytes": 5000000,
# "period_sec": 0.03
# }
def set_new_params(params):
params_as_json = json.dumps(params).encode('utf8')
print(params_as_json)
# conditionsSetURL = 'http://httpbin.org/post'
# newConditions = {"con1": 40, "con2": 20, "con3": 99, "con4": 40, "password": "1234"}
# params = json.dumps(newConditions).encode('utf8')
req = urllib.request.Request('http://localhost:8081', data=params_as_json,
headers={'content-type': 'application/json'})
response = urllib.request.urlopen(req)
print(response.read().decode('utf8'))
if __name__ == '__main__':
p = {
"cpu_pause_ms": 20,
"message_bytes": 5000000,
"period_sec": 0.03
}
set_new_params(p)
|
from django.conf.urls import url
from . import views
from django.conf import settings
urlpatterns=[
url('^$',views.index,name = 'index'),
url('^create/profile$',views.update_profile,name='profile'),
url(r'^post/create',views.post,name = 'posthood'),
url(r'^business/create',views.business,name = 'postbusiness'),
url(r'^business/view',views.view_business,name = 'viewBusiness'),
url(r'^search/',views.search_bizna, name='search_bizna'),
] |
# 変数宣言
name = 'NerdApe'
twitter_id = '12345'
faceBook = 1234
# 整数型、文字型、浮動小数点型
# int()型、str()型、float()型
numbers = 12345
strings = 'NerdELu'
strings_02 = "KustomApe"
recommend = 'single'
special_strings = "I don't think that is right thing to do."
syosu = 1.23
syosu_02 = 4.56
# 関数
print(name)
|
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
import inspect
import textwrap
from pyiron.base.job.generic import GenericJob
"""
The GenericMaster is the template class for all meta jobs
"""
__author__ = "Jan Janssen"
__copyright__ = (
"Copyright 2020, Max-Planck-Institut für Eisenforschung GmbH - "
"Computational Materials Design (CM) Department"
)
__version__ = "1.0"
__maintainer__ = "Jan Janssen"
__email__ = "janssen@mpie.de"
__status__ = "production"
__date__ = "Sep 1, 2017"
class GenericMaster(GenericJob):
"""
The GenericMaster is the template class for all meta jobs - meaning all jobs which contain multiple other jobs. It
defines the shared functionality of the different kind of job series.
Args:
project (ProjectHDFio): ProjectHDFio instance which points to the HDF5 file the job is stored in
job_name (str): name of the job, which has to be unique within the project
Attributes:
.. attribute:: job_name
name of the job, which has to be unique within the project
.. attribute:: status
execution status of the job, can be one of the following [initialized, appended, created, submitted,
running, aborted, collect, suspended, refresh,
busy, finished]
.. attribute:: job_id
unique id to identify the job in the pyiron database
.. attribute:: parent_id
job id of the predecessor job - the job which was executed before the current one in the current job series
.. attribute:: master_id
job id of the master job - a meta job which groups a series of jobs, which are executed either in parallel
or in serial.
.. attribute:: child_ids
list of child job ids - only meta jobs have child jobs - jobs which list the meta job as their master
.. attribute:: project
Project instance the jobs is located in
.. attribute:: project_hdf5
ProjectHDFio instance which points to the HDF5 file the job is stored in
.. attribute:: job_info_str
short string to describe the job by it is job_name and job ID - mainly used for logging
.. attribute:: working_directory
working directory of the job is executed in - outside the HDF5 file
.. attribute:: path
path to the job as a combination of absolute file system path and path within the HDF5 file.
.. attribute:: version
Version of the hamiltonian, which is also the version of the executable unless a custom executable is used.
.. attribute:: executable
Executable used to run the job - usually the path to an external executable.
.. attribute:: library_activated
For job types which offer a Python library pyiron can use the python library instead of an external
executable.
.. attribute:: server
Server object to handle the execution environment for the job.
.. attribute:: queue_id
the ID returned from the queuing system - it is most likely not the same as the job ID.
.. attribute:: logger
logger object to monitor the external execution and internal pyiron warnings.
.. attribute:: restart_file_list
list of files which are used to restart the calculation from these files.
.. attribute:: job_type
Job type object with all the available job types: ['ExampleJob', 'SerialMaster', 'ParallelMaster',
'ScriptJob', 'ListMaster']
.. attribute:: child_names
Dictionary matching the child ID to the child job name.
"""
def __init__(self, project, job_name):
super(GenericMaster, self).__init__(project, job_name=job_name)
self._job_name_lst = []
self._job_object_dict = {}
self._child_id_func = None
self._child_id_func_str = None
@property
def child_names(self):
"""
Dictionary matching the child ID to the child job name
Returns:
dict: {child_id: child job name }
"""
child_dict = {}
for child_id in self.child_ids:
child_dict[child_id] = self.project.db.get_item_by_id(child_id)["job"]
return child_dict
@property
def child_ids(self):
"""
list of child job ids - only meta jobs have child jobs - jobs which list the meta job as their master
Returns:
list: list of child job ids
"""
if self._child_id_func:
return self._child_id_func(self)
else:
return super(GenericMaster, self).child_ids
@property
def job_object_dict(self):
"""
internal cache of currently loaded jobs
Returns:
dict: Dictionary of currently loaded jobs
"""
return self._job_object_dict
def first_child_name(self):
"""
Get the name of the first child job
Returns:
str: name of the first child job
"""
return self.project.db.get_item_by_id(self.child_ids[0])["job"]
def validate_ready_to_run(self):
"""
Validate that the calculation is ready to be executed. By default no generic checks are performed, but one could
check that the input information is complete or validate the consistency of the input at this point.
"""
pass
def append(self, job):
"""
Append a job to the GenericMaster - just like you would append an element to a list.
Args:
job (GenericJob): job to append
"""
if job.server.cores >= self.server.cores:
self.server.cores = job.server.cores
if job.job_name not in self._job_name_lst:
self._job_name_lst.append(job.job_name)
self._child_job_update_hdf(parent_job=self, child_job=job)
def pop(self, i=-1):
"""
Pop a job from the GenericMaster - just like you would pop an element from a list
Args:
i (int): position of the job. (Default is last element, -1.)
Returns:
GenericJob: job
"""
job_name_to_return = self._job_name_lst[i]
job_to_return = self._load_all_child_jobs(
self._load_job_from_cache(job_name_to_return)
)
del self._job_name_lst[i]
with self.project_hdf5.open("input") as hdf5_input:
hdf5_input["job_list"] = self._job_name_lst
job_to_return.project_hdf5.remove_group()
job_to_return.project_hdf5 = self.project_hdf5.__class__(
self.project, job_to_return.job_name, h5_path="/" + job_to_return.job_name
)
if isinstance(job_to_return, GenericMaster):
for sub_job in job_to_return._job_object_dict.values():
self._child_job_update_hdf(parent_job=job_to_return, child_job=sub_job)
job_to_return.status.initialized = True
return job_to_return
def move_to(self, project):
"""
Move the content of the job including the HDF5 file to a new location
Args:
project (ProjectHDFio): project to move the job to
Returns:
JobCore: JobCore object pointing to the new location.
"""
if self._job_id:
for child_id in self.child_ids:
child = self.project.load(child_id)
child.move_to(project.open(self.job_name + "_hdf5"))
super(GenericMaster, self).move_to(project)
def copy_to(
self, project=None, new_job_name=None, input_only=False, new_database_entry=True
):
"""
Copy the content of the job including the HDF5 file to a new location
Args:
project (ProjectHDFio): project to copy the job to
new_job_name (str): to duplicate the job within the same porject it is necessary to modify the job name
- optional
input_only (bool): [True/False] to copy only the input - default False
new_database_entry (bool): [True/False] to create a new database entry - default True
Returns:
GenericJob: GenericJob object pointing to the new location.
"""
new_generic_job = super(GenericMaster, self).copy_to(
project=project,
new_job_name=new_job_name,
input_only=input_only,
new_database_entry=new_database_entry,
)
if new_generic_job.job_id and new_database_entry and self._job_id:
for child_id in self.child_ids:
child = self.project.load(child_id)
new_child = child.copy_to(
project=project.open(self.job_name + "_hdf5"),
new_database_entry=new_database_entry,
)
if new_database_entry and child.parent_id:
new_child.parent_id = new_generic_job.job_id
if new_database_entry and child.master_id:
new_child.master_id = new_generic_job.job_id
return new_generic_job
def to_hdf(self, hdf=None, group_name=None):
"""
Store the GenericMaster in an HDF5 file
Args:
hdf (ProjectHDFio): HDF5 group object - optional
group_name (str): HDF5 subgroup name - optional
"""
super(GenericMaster, self).to_hdf(hdf=hdf, group_name=group_name)
with self.project_hdf5.open("input") as hdf5_input:
hdf5_input["job_list"] = self._job_name_lst
self._to_hdf_child_function(hdf=hdf5_input)
for job in self._job_object_dict.values():
job.to_hdf()
def from_hdf(self, hdf=None, group_name=None):
"""
Restore the GenericMaster from an HDF5 file
Args:
hdf (ProjectHDFio): HDF5 group object - optional
group_name (str): HDF5 subgroup name - optional
"""
super(GenericMaster, self).from_hdf(hdf=hdf, group_name=group_name)
with self.project_hdf5.open("input") as hdf5_input:
job_list_tmp = hdf5_input["job_list"]
self._from_hdf_child_function(hdf=hdf5_input)
self._job_name_lst = job_list_tmp
def set_child_id_func(self, child_id_func):
"""
Add an external function to derive a list of child IDs - experimental feature
Args:
child_id_func (Function): Python function which returns the list of child IDs
"""
self._child_id_func = child_id_func
self.save()
self.status.finished = True
def get_child_cores(self):
"""
Calculate the currently active number of cores, by summarizing all childs which are neither finished nor
aborted.
Returns:
(int): number of cores used
"""
return sum(
[
int(db_entry["computer"].split("#")[1])
for db_entry in self.project.db.get_items_dict(
{"masterid": self.job_id}
)
if db_entry["status"] not in ["finished", "aborted"]
]
)
def write_input(self):
"""
Write the input files for the external executable. This method has to be implemented in the individual
hamiltonians.
"""
raise NotImplementedError(
"write procedure must be defined for derived Hamilton!"
)
def collect_output(self):
"""
Collect the output files of the external executable and store the information in the HDF5 file. This method has
to be implemented in the individual hamiltonians.
"""
raise NotImplementedError(
"read procedure must be defined for derived Hamilton!"
)
def run_if_interactive(self):
"""
For jobs which executables are available as Python library, those can also be executed with a library call
instead of calling an external executable. This is usually faster than a single core python job.
"""
raise NotImplementedError(
"This function needs to be implemented in the specific class."
)
def interactive_close(self):
"""
interactive close is not implemtned for MetaJobs
"""
pass
def interactive_fetch(self):
"""
interactive fetch is not implemtned for MetaJobs
"""
pass
def interactive_flush(self, path="generic", include_last_step=True):
"""
interactive flush is not implemtned for MetaJobs
"""
pass
def run_if_interactive_non_modal(self):
"""
Run if interactive non modal is not implemented for MetaJobs
"""
pass
def __len__(self):
"""
Length of the GenericMaster equal the number of childs appended.
Returns:
int: length of the GenericMaster
"""
return len(self._job_name_lst)
def __getitem__(self, item):
"""
Get/ read data from the GenericMaster
Args:
item (str, slice): path to the data or key of the data object
Returns:
dict, list, float, int: data or data object
"""
child_id_lst = self.child_ids
child_name_lst = [
self.project.db.get_item_by_id(child_id)["job"]
for child_id in self.child_ids
]
if isinstance(item, int):
item = self._job_name_lst[item]
return self._get_item_when_str(
item=item, child_id_lst=child_id_lst, child_name_lst=child_name_lst
)
def __getattr__(self, item):
"""
CHeck if a job with the specific name exists
Args:
item (str): name of the job
Returns:
"""
item_from_get_item = self.__getitem__(item=item)
if item_from_get_item is not None:
return item_from_get_item
else:
raise AttributeError
def _load_all_child_jobs(self, job_to_load):
"""
Helper function to load all child jobs to memory - like it was done in the previous implementation
Args:
job_to_load (GenericJob): job to be reloaded
Returns:
GenericJob: job to be reloaded - including all the child jobs and their child jobs
"""
if isinstance(job_to_load, GenericMaster):
for sub_job_name in job_to_load._job_name_lst:
job_to_load._job_object_dict[sub_job_name] = self._load_all_child_jobs(
job_to_load._load_job_from_cache(sub_job_name)
)
return job_to_load
def _load_job_from_cache(self, job_name):
"""
Helper funcction to load a job either from the _job_object_dict or from the HDF5 file
Args:
job_name (str): name of the job
Returns:
GenericJob: the reloaded job
"""
if job_name in self._job_object_dict.keys():
return self._job_object_dict[job_name]
else:
ham_obj = self.project_hdf5.create_object(
class_name=self._hdf5[job_name + "/TYPE"],
project=self._hdf5,
job_name=job_name,
)
ham_obj.from_hdf()
return ham_obj
def _to_hdf_child_function(self, hdf):
"""
Helper function to store the child function in HDF5
Args:
hdf: HDF5 file object
"""
hdf["job_list"] = self._job_name_lst
if self._child_id_func is not None:
try:
hdf["child_id_func"] = inspect.getsource(self._child_id_func)
except IOError:
hdf["child_id_func"] = self._child_id_func_str
else:
hdf["child_id_func"] = "None"
def _from_hdf_child_function(self, hdf):
"""
Helper function to load the child function from HDF5
Args:
hdf: HDF5 file object
"""
try:
child_id_func_str = hdf["child_id_func"]
except ValueError:
child_id_func_str = "None"
if child_id_func_str == "None":
self._child_id_func = None
else:
self._child_id_func_str = child_id_func_str
self._child_id_func = get_function_from_string(child_id_func_str)
def _get_item_when_str(self, item, child_id_lst, child_name_lst):
"""
Helper function for __get_item__ when item is type string
Args:
item (str):
child_id_lst (list): a list containing all child job ids
child_name_lst (list): a list containing the names of all child jobs
Returns:
anything
"""
name_lst = item.split("/")
item_obj = name_lst[0]
if item_obj in child_name_lst:
child_id = child_id_lst[child_name_lst.index(item_obj)]
if len(name_lst) > 1:
return self.project.inspect(child_id)["/".join(name_lst[1:])]
else:
return self.project.load(child_id, convert_to_object=True)
elif item_obj in self._job_name_lst:
child = self._load_job_from_cache(job_name=item_obj)
if len(name_lst) == 1:
return child
else:
return child["/".join(name_lst[1:])]
else:
return super(GenericMaster, self).__getitem__(item)
def _child_job_update_hdf(self, parent_job, child_job):
"""
Args:
parent_job:
child_job:
"""
child_job.project_hdf5.file_name = parent_job.project_hdf5.file_name
child_job.project_hdf5.h5_path = (
parent_job.project_hdf5.h5_path + "/" + child_job.job_name
)
if isinstance(child_job, GenericMaster):
for sub_job_name in child_job._job_name_lst:
self._child_job_update_hdf(
parent_job=child_job,
child_job=child_job._load_job_from_cache(sub_job_name),
)
parent_job.job_object_dict[child_job.job_name] = child_job
def _executable_activate_mpi(self):
"""
Internal helper function to switch the executable to MPI mode
"""
pass
def run_if_refresh(self):
"""
Internal helper function the run if refresh function is called when the job status is 'refresh'. If the job was
suspended previously, the job is going to be started again, to be continued.
"""
raise NotImplementedError(
"Refresh is not supported for this job type for job " + str(self.job_id)
)
def _run_if_busy(self):
"""
Run if busy is not implemented for MetaJobs
"""
pass
def get_function_from_string(function_str):
"""
Convert a string of source code to a function
Args:
function_str: function source code
Returns:
function:
"""
function_dedent_str = textwrap.dedent(function_str)
exec(function_dedent_str)
return eval(function_dedent_str.split("(")[0][4:])
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 25 11:25:39 2021
@author: alann
"""
import requests
from datetime import datetime
apiKey = '65837ca6772c3e676cacc80f5428'
def filterByCountry(partners) -> dict():
"""
This can be done on the fly but I felt like it would be too much nesting.
"""
countries = dict()
for partner in partners:
countries.setdefault(partner['country'],[]).append(partner)
return countries
def checkDatesForCountry(country):
availabilityDates = dict()
mostAvailableDate = None
mostAvailableDateEmails = set()
for partner in country:
"""
That pesky edge case where the input was simpy a 'U'
More defensive programming could be utilised in case the nested objects are malformed .
Especially when it comes to formatting the dates down below
"""
if isinstance(partner, dict):
for idx,date in enumerate(partner['availableDates']):
"""
Assuming dates are sorted, I check if the next day is at most 1 day away,
otherwise that partner is not available for the consecutive dates
"""
if idx+1 < len(partner['availableDates']):
nextDate = datetime.strptime(partner['availableDates'][idx+1], "%Y-%m-%d").toordinal()
currDate = datetime.strptime(date, "%Y-%m-%d").toordinal()
if (nextDate - currDate) == 1:
availabilityDates.setdefault(date,[]).append(partner['email'])
"""
Keep track of the most common date and the people available for that day,
if the lengths are equal, the earliest date is considered based on ordinal values
"""
if(len(availabilityDates[date]) == len(mostAvailableDateEmails)):
if currDate < datetime.strptime(mostAvailableDate, "%Y-%m-%d").toordinal():
mostAvailableDate = date
mostAvailableDateEmails = availabilityDates[date]
elif len(availabilityDates[date]) > len(mostAvailableDateEmails):
mostAvailableDate = date
mostAvailableDateEmails = availabilityDates[date]
return mostAvailableDate, mostAvailableDateEmails
def constructResponse(countries):
responseObject = {}
countryList = []
for country in countries:
countryResponse = {}
date, emails = checkDatesForCountry(countries[country])
countryResponse['attendeeCount'] = len(emails)
countryResponse['attendees'] = emails
countryResponse['name'] = country
countryResponse['startDate'] = date if date != None else 'null'
countryList.append(countryResponse)
responseObject['countries'] = countryList
return responseObject
listOfPartners = requests.get('https://candidate.hubteam.com/candidateTest/v3/problem/dataset?userKey='+apiKey);
countries = filterByCountry(listOfPartners.json()['partners'])
payload = constructResponse(countries)
headers={'Content-type':'application/json', 'Accept':'application/json'}
res = requests.post('https://candidate.hubteam.com/candidateTest/v3/problem/result?userKey='+apiKey,json = payload, headers = headers)
print(res.content)
|
"""Shows plugins that are loaded"""
import colors
import common
import logger
import plugin_api
_logger = logger.LOGGER
class Plugin(plugin_api.LocalPlugin):
"""Plugin to show loaded plugins"""
@property
def enabled(self):
"""ALWAYS ENABLED!"""
return True
async def on_message(self, target, by, message):
if message == '.plugins list':
plugin_list = []
for name, plugin in self.client.plugins.items():
if plugin.enabled:
plugin_list.append(
colors.colorize(name, fg=colors.GREEN)
)
else:
plugin_list.append(
colors.colorize(name, fg=colors.RED)
)
await self.client.message(
target,
', '.join(sorted(plugin_list))
)
if message == '.plugins reload':
if not await common.is_user_admin_whois(self.client, by):
_logger.info('%s is not an admin, cant reload plugins')
return
self.client.plugins = common.load_py_plugins(
self.client.chatnet,
reload=True
)
await self.client.message(
target,
f'🔌 {colors.BOLD}R E L O A D E D{colors.BOLD} 🔌'
)
def help_msg(self):
return {
'list': 'use ".plugins list" to list loaded plugins',
'reload': 'use ".plugins reload" to reload all plugins'
}
|
# Text to speech toolkits
import speech_recognition as sr
import pyttsx3
from pydub import AudioSegment
from pydub.utils import make_chunks
import wave
import pyaudio
# Generator Natural Langugae Tool Kit
import nltk
from nltk.data import load
from nltk import CFG
from nltk.grammar import is_nonterminal
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import BigramAssocMeasures
from nltk.corpus import stopwords
# Other Toolkits
import random
import sys # print logging save to file - pip install os-sys
import re
from Arduino import Arduino
import time
# Global TTS variables
listener = sr.Recognizer()
engine = pyttsx3.init()
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[1].id)
poetry_voice_rate = 120
normal_voice_rate = 160
# Arduino Initialise
red_led_pin = 5 # recording
white_led_pin = 6 # stored
choice_servo_pin = 3 # source text choice - 40 recording, 80 bible, 130 poe, 180 sh
type_servo_pin = 4 # poetry types - 20 haiku, 70 short, 110 medium, 160 long
button_pin = 2 # stop recording
baud = '115200'
port = '/dev/ttyACM0' # '/dev/ttyACM0' - on RPI or 'COM3' - on windows
board = Arduino(baud, port=port)
board.pinMode(red_led_pin, "OUTPUT") # REC
board.pinMode(white_led_pin, "OUTPUT") # Store
board.Servos.attach(choice_servo_pin) # servo 1
board.Servos.attach(type_servo_pin) # servo 2
stop_button = board.analogRead(button_pin) # stop button analog 0 or 1000
# ----------------------------------------------- Generator Setup ----------------------------------------------
class Text:
def __init__(self, raw_text):
self.text_array = nltk.word_tokenize(raw_text)
self.POS_buckets = {}
tagged_text_array = nltk.pos_tag(self.text_array)
self.tags = load('help/tagsets/upenn_tagset.pickle')
for tag in self.tags:
self.POS_buckets[tag] = []
for tuple in tagged_text_array:
self.POS_buckets[tuple[1]].append(tuple[0].lower())
self.before = {}
self.after = {}
for word in self.text_array:
self.before[word] = []
self.after[word] = []
for i in range(len(self.text_array)): # range goes to one less than given value
if i > 0:
self.before[self.text_array[i]].append(self.text_array[i - 1])
if i < len(self.text_array) - 1:
self.after[self.text_array[i]].append(self.text_array[i + 1])
# return list of two word collocation lists
def get_collocations(self):
ignored_words = stopwords.words('english')
finder = BigramCollocationFinder.from_words(self.text_array, 2)
finder.apply_freq_filter(3)
finder.apply_word_filter(lambda w: len(w) < 3 or w.lower() in ignored_words)
bigram_measures = BigramAssocMeasures()
return finder.nbest(bigram_measures.likelihood_ratio, 40)
class Grammar:
def __init__(self, haiku):
# comment about what each part of speach is:
""" CC - conjunction: or, but, and, either
CD - number: one, two, three
DT - determiner: a, an, the, both, all, these, any, some
EX - the word 'there'
IN - preposition: in, of, with, for, under, among, upon, at
JJ - adjective: certain, curious, little, golden, other, offended
JJS - adjective: -est : best, loveliest, largest
JJR - adjective: -er : lerger, smaller, worse
MD - can, dare, should, will*, might, could, must
NN - common singular noun
NNS - common plural noun
NNP - proper singular noun
NNPS - proper plural noun
PDT - all, both, quite, many, half
PRP - hers, her, himself, thy, us, it, I, him, you, they
PRPP - possesive: his, mine, our, my, her, its, your
RB - adverb: very, not, here, there, first, just, down, again, beautifully, -ly
RBR - more
RBS - adverb superlative: -est
RP - participle: up, down, out, away, over, off
TO - the word 'to'
UH - interjection
VB - vocative verb: to ___
VBD - past verb: -ed : was*(freq. occur), had, dipped, were, said, seemed
VBG - present verb: -ing: trembling, trying, getting, running, swimming
VBN - past verb descriptive: crowded, mutated, fallen, lit, lost, forgtten
VBP - present verb: not -s: am, wish, make, know, do, find
VBZ - present verb: -s : is*, has, seems
WDT - what, which, that*
WP - who, what
WRB - how, whenever, where, why, when
"""
# create base of cfg
if not haiku:
g = CFG.fromstring("""
S -> NPS VPS | NPS VPS | NPS VPS | NPP VPP | VPO | NPO
S -> NPS VPS | NPP VPP | NPS VPS
NPS -> 'DT' 'NN' | 'DT' 'NN' | 'DT' 'JJ' 'NN' | 'DT' 'JJ' 'NN'
NPS -> 'EX' 'the' 'NN' | 'the' 'JJS' 'NN'
NPS -> 'she' | 'he' | 'it' | 'I'
NPS -> NPS INP | INP NPS
NPP -> 'the' 'NNS' | 'the' 'NNS' | 'NNS'
NPP -> 'the' 'JJ' 'NNS'
NPP -> 'they' | 'you' | 'we'
VING -> 'VBG' | 'VBG' | 'RB' 'VBG'
VBB -> 'VB' | 'VB' | 'VBP'
SM -> 'is' | 'was' | 'has been'
VPS -> SM 'VBN' | SM 'VBN' 'like the' 'JJ' 'NN'
VPS -> SM VING | SM VING INP
VPS -> SM VING 'like' 'DT' 'JJ' 'NN'
VPS -> SM VING 'like a' 'NN' INP
VPS -> SM 'as' 'JJ' 'as' 'JJ'
VPS -> SM 'a' 'JJ' 'NN'
VPS -> SM 'a' 'NN' INP
VPS -> 'MD' 'have been' VING
VPS -> 'is' 'JJ' 'and' 'JJ'
VPS -> 'VBD' INP | 'RB' 'VBD'
VPS -> SM 'VBD' 'like' 'DT' 'JJ' 'NN'
VPS -> SM 'as' 'JJ' 'as the' 'NN'
VPS -> 'VBD' 'NN' | 'VBD' 'DT' 'NN'
VPS -> 'VBD' 'and' 'VBD' INP 'until' 'VBN'
VPS -> VPS 'and' S
VPS -> 'VBD' 'JJR' 'than' 'a' 'NN'
VPS -> 'VBD' 'EX'
VPS -> SM 'JJ' | SM 'VB' INP
NPO -> 'a' 'NN' 'IN' 'NNP'
NPO -> 'the' 'NN' 'IN' 'the' 'JJ' 'NNP'
NPO -> 'the' 'NNS' 'IN' 'the' 'NN'
VPO -> 'VBG' 'like' 'NNP' 'RP' 'DT' 'JJ' 'NN' 'IN' 'DT' 'NN'
VPO -> 'has been' 'VBG' 'RP' 'and' 'VBG'
PM -> 'are' | 'were' | 'have been'
VPP -> PM VING | PM VING INP
VPP -> PM VING 'like the' 'NNS' INP
VPP -> PM 'as' 'JJ' 'as' NPS INP | PM 'JJ' 'like' 'NNS' | PM 'JJ' 'like' VBG 'NNS'
VPP -> PM 'VBN' | PM 'VBN' INP
VPP -> PM 'as' 'JJ' 'as' 'JJ' | PM 'as' 'JJ' 'as' 'VBG' 'NNS'
VPP -> PM 'NNS' INP
VPP -> PM 'JJ' 'NNS'
VPP -> 'are' 'JJ' 'and' 'JJ'
VPP -> 'VBD' INP | 'VBD' 'RP' INP
VPP -> PM 'JJ' | PM 'VB' INP
INP -> 'IN' 'DT' 'NN' | 'IN' 'the' 'NNS' | 'IN' 'the' 'JJ' 'NNS'
INP -> 'IN' 'DT' 'NN' 'IN' 'DT' 'NN'
INP -> 'IN' 'DT' 'JJ' 'NN' | 'RP' 'IN' 'DT' 'JJ' 'NN'
INP -> 'RP' 'IN' 'DT' 'NN' | 'IN' 'JJ' 'NNS'
INP -> 'IN' 'DT' 'NN' | 'RP' 'DT' 'NNS'
""")
# save grammar to self.cfg
self.cfg = CFG.fromstring(str(g).split('\n')[1:])
self.cfg._start = g.start()
elif haiku:
g2 = CFG.fromstring("""
S -> 'DT' 'JJ' 'NNS'
S -> 'VBD' 'NNS'
S -> 'NNS' 'VBD'
""")
self.cfg = CFG.fromstring(str(g2).split('\n')[1:])
self.cfg._start = g2.start()
def gen_frame_line(self, nt):
sentence = ''
prods = random.sample(self.cfg.productions(lhs=nt), len(self.cfg.productions(lhs=nt)))
valid = True
for prod in prods:
for sym in prod.rhs():
if is_nonterminal(sym):
if len(self.cfg.productions(lhs=sym)) < 1:
valid = False
if valid == True:
for sym in prod.rhs():
if is_nonterminal(sym):
sentence += self.gen_frame_line(sym)
else:
sentence += sym + ' '
break
if valid == False:
return " " # ERROR
else:
return sentence # removed capitalize
class Spot:
def __init__(self, wop, line, column, content):
if content == 'POS':
self.word = ''
self.POS = wop
self.line = line
self.column = column
self.filled = False
self.preset = False
elif content == 'word':
self.word = wop
self.POS = ''
self.line = line
self.column = column
self.filled = True
self.preset = True
else:
print(" ") # spot content error
def fill(self, word):
self.word = word
self.filled = True
def add_POS(self, pos):
self.POS = pos
class Frame:
def __init__(self, grammar, tags, length, haiku):
self.lines = []
repeat_line_array = nltk.word_tokenize(grammar.gen_frame_line(grammar.cfg.start()))
if haiku == True:
x = 3
y = 2
elif haiku == False:
x = random.randint(0, length)
y = random.randint(0, length)
for i in range(length):
if (i == x or i == y):
spot_array = []
j = 0
noun_set = set(['he', 'she', 'it', 'I'])
for wop in repeat_line_array:
if wop in set(tags):
spot = Spot(wop, i, j, 'POS')
if (wop in noun_set):
spot.add_POS('NN')
spot_array.append(spot)
else:
spot = Spot(wop, i, j, 'word')
spot_array.append(spot)
j += 1
self.lines.append(spot_array)
else:
line_array = nltk.word_tokenize(grammar.gen_frame_line(grammar.cfg.start()))
spot_array = []
j = 0
for wop in line_array:
if wop in set(tags):
spot = Spot(wop, i, j, 'POS')
spot_array.append(spot)
else:
spot = Spot(wop, i, j, 'word')
spot_array.append(spot)
j += 1
self.lines.append(spot_array)
def add_collocations(self, text):
collocations = text.get_collocations()
tagged_collocation_list = []
for collocation in collocations:
tagged_collocation_list.append(nltk.pos_tag(collocation))
for tagged_collocation in tagged_collocation_list:
POS_pair = [tagged_collocation[0][1], tagged_collocation[1][1]]
word_pair = [tagged_collocation[0][0], tagged_collocation[1][0]]
j = 0
collocation_used = False
for line in self.lines:
if collocation_used == False:
for i in range(len(line) - 1): # 0 to line.length-2
if POS_pair == [line[i], line[i + 1]]:
line[i].fill(word_pair[0])
line[i + 1].fill(word_pair[1])
collocation_used = True
break
j += 1
def add_big_words(self, text):
fdist = nltk.FreqDist(text.text_array)
big_words = []
for w in set(text.text_array):
if len(w) > 6 and fdist[w] > 2:
big_words.append(w)
big_words_with_tags = nltk.pos_tag(big_words)
big_word_buckets = {}
for tag in text.tags: # initialize list of words for each tag
big_word_buckets[tag] = []
for big_word_tuple in big_words_with_tags:
big_word_buckets[big_word_tuple[1]].append(big_word_tuple[0])
used_words = []
for line in self.lines:
for spot in line:
if spot.filled == False and len(big_word_buckets[spot.POS]) > 0:
n = random.randint(0, len(big_word_buckets[spot.POS]) - 1)
big_word = big_word_buckets[spot.POS][n]
if big_word in set(used_words):
pass
else:
spot.fill(big_word)
used_words.append(big_word)
def repeat_nouns(self, length):
noun = ''
for spot in self.lines[0]:
if spot.POS == 'NN' and spot.filled == True:
noun = spot.word
break
if noun == '': return
for i in range(1, length):
for spot in self.lines[i]:
if spot.POS == 'NN' and spot.filled == False:
spot.fill(noun)
break
def add_context_words(self, text):
for line in self.lines:
for spot in line:
if spot.filled == True:
if spot.column > 0 and line[spot.column - 1].filled == False and spot.preset == False:
for before_word in text.before[spot.word]:
if line[spot.column - 1].POS == nltk.pos_tag([before_word])[0][1]:
line[spot.column - 1].fill(before_word)
break
if spot.column < len(line) - 1 and line[spot.column + 1].filled == False and spot.preset == False:
for after_word in text.after[spot.word]:
if line[spot.column + 1].POS == nltk.pos_tag([after_word])[0][1]:
line[spot.column + 1].fill(after_word)
break
def add_random(self, text):
while True:
x = random.randint(0, 8)
y = random.randint(0, len(self.lines[x]))
spot = self.lines[x][y]
if not spot.filled:
n = random.randint(0, len(text.POS_buckets[spot.POS]) - 1)
word = text.POS_buckets[spot.POS][n]
spot.fill(word)
return
def add_first_unfilled(self, text):
for line in self.lines:
for spot in line:
if spot.filled == False:
n = random.randint(0, len(text.POS_buckets[spot.POS]) - 1)
word = text.POS_buckets[spot.POS][n]
spot.fill(word)
break
def fill_remaining(self, text):
for line in self.lines:
for spot in line:
if not spot.filled:
n = random.randint(0, len(text.POS_buckets[spot.POS]) - 1)
word = text.POS_buckets[spot.POS][n]
spot.fill(word)
def print(self):
for line in self.lines:
for spot in line:
if spot.filled:
print(spot.word, end=" ")
else:
print(spot.POS, end=" ")
print()
print()
# ------------------------------------------- TTS Setup -----------------------------------------------------------
class TTS:
def take_command() -> object:
try:
with sr.Microphone() as source:
command = 'No voice identified!\n'
print('Gathering audio input!\n')
voice = listener.listen(source) #, phrase_time_limit=10000) # argument lo litsen a given time
command = listener.recognize_google(voice)
command = command.lower()
if 'edgar' in command:
command = command.replace('edgar', '')
print(command + '\n')
except:
pass
return command
def talk(text):
engine.say(text)
engine.runAndWait()
def record_audio():
chunk = 1024 # Record in chunks of 1024 samples
sample_format = pyaudio.paInt16 # 16 bits per sample
channels = 2
fs = 44100 # Record at 44100 samples per second
seconds = 3600 # 1h
filename = "recording_audio_temp.wav"
p = pyaudio.PyAudio() # Create an interface to PortAudio
print('Recording')
board.digitalWrite(red_led_pin, 'HIGH')
stream = p.open(format=sample_format,
channels=channels,
rate=fs,
frames_per_buffer=chunk,
input=True)
frames = [] # Initialize array to store frames
# Store data in chunks for 3 seconds
for i in range(0, int(fs / chunk * seconds)):
data = stream.read(chunk)
frames.append(data)
board.analogRead(button_pin)
if board.analogRead(button_pin) > 500:
break
# Stop and close the stream
stream.stop_stream()
stream.close()
# Terminate the PortAudio interface
p.terminate()
board.digitalWrite(red_led_pin, 'LOW')
print('Finished recording')
# Save the recorded data as a WAV file
wf = wave.open(filename, 'wb')
wf.setnchannels(channels)
wf.setsampwidth(p.get_sample_size(sample_format))
wf.setframerate(fs)
wf.writeframes(b''.join(frames))
wf.close()
print('Recording saved to file\n')
def audio_file_chunks():
source_audio = AudioSegment.from_file("recording_audio_temp.wav", "wav")
chunk_length_ms = 10000
chunks = make_chunks(source_audio, chunk_length_ms)
for i, chunk in enumerate(chunks):
chunk_name = "chunk{0}.wav".format(i)
print("exporting", chunk_name)
chunk.export(chunk_name, format="wav")
i = 0
with open('recording_audio_temp.txt', 'w+') as recording_temp:
sys.stdout = recording_temp
for chunk in chunks:
chunk_silent = AudioSegment.silent(duration=10)
audio_chunk = chunk_silent + chunk + chunk_silent
audio_chunk.export("./chunk{0}.wav".format(i), bitrate='192k', format="wav")
filename = 'chunk' + str(i) + '.wav'
file = filename
r = sr.Recognizer()
with sr.AudioFile(file) as source:
audio = r.listen(source)
try:
print(r.recognize_google(audio))
except sr.UnknownValueError:
print(" ")
except sr.RequestError as e:
print(" ".format(e))
i += 1
recording_temp.close()
print('Exporting done')
def read_last_poem():
engine.setProperty('rate', poetry_voice_rate) # slow down voice for poetry reading
with open('poems_last.txt') as poems_last:
string_without_line_breaks = ' '
for line in poems_last:
stripped_line = line.rstrip()
string_without_line_breaks += stripped_line + ' '
poem = str(string_without_line_breaks) # read all lines
poem_str = str(poem) # read from start
TTS.talk(poem_str)
engine.setProperty('rate', normal_voice_rate) # back to normal voice rate
# ----------------------------------------------------------- Functions Declaration -----------------------------------------------------------
class Functions:
def clean_source_text(source):
file = open(source, 'r+', encoding='utf-8')
text = file.read()
text = re.sub('[0-9]+', '', str(text))
text = re.sub('\n ', '', str(text))
text = re.sub('\n', ' ', str(text))
text = re.sub('_', '', str(text))
text = re.sub(':', '', str(text))
text = re.sub("'s", '', str(text))
text = re.sub("-", ' ', str(text))
text = re.sub("— ", '', str(text))
text = re.sub('\"', '', str(text))
text = re.split('[.?!]', text)
clean_sent = []
for sent in text:
clean_sent.append(sent)
separator = ' '
clean_text = separator.join(clean_sent)
return clean_text
def run_generator(source, length, haiku):
file = open(source, 'r+', encoding='utf-8')
read_source = file.read()
text = Text(read_source) # seperates words into POS buckets
grammar = Grammar(haiku) # makes CFG
frame = Frame(grammar, text.tags, length, haiku) # create "frame" of poem: list of lists of POS tags
frame.add_collocations(text)
frame.add_big_words(text)
frame.repeat_nouns(length)
for x in range(3):
frame.add_context_words(text)
frame.add_first_unfilled(text)
frame.repeat_nouns(length)
frame.add_context_words(text)
frame.fill_remaining(text)
with open('poems_last.txt', 'w+') as file_last_poem:
sys.stdout = file_last_poem
frame.print()
file_last_poem.close()
def save_poem_database():
board.digitalWrite(white_led_pin, "HIGH")
time.sleep(2)
poem_database = open('poems_all.txt', 'a+')
with open('poems_last.txt', 'r+') as file_last_poem:
last_poem_temp = file_last_poem.readlines()
poem_database.write('**#**\n')
for line in last_poem_temp:
poem_database.write(line)
file_last_poem.close()
poem_database.write('\r\n')
poem_database.close()
print('Poem stored!\n')
time.sleep(2)
board.digitalWrite(white_led_pin, "LOW")
def retrieve_from_database():
with open('poems_all.txt', "r") as fh:
raw_text = fh.read()
raw_text2 = raw_text.replace('\n', '')
text_split = raw_text2.split('**#**')
text_split.remove('')
poem = random.choice(tuple(text_split))
print(poem)
engine.setProperty('rate', poetry_voice_rate) # skip empty character item
TTS.talk(poem)
engine.setProperty('rate', normal_voice_rate)
def run_edgar():
command = TTS.take_command() # take audio input
if command != ('No voice identified!\n'):
time.sleep(1)
board.Servos.write(choice_servo_pin, 90) # boot sequence
board.Servos.write(type_servo_pin, 90) # boot sequence
time.sleep(0.5)
board.Servos.write(type_servo_pin, 0)
board.Servos.write(choice_servo_pin, 180)
time.sleep(0.5)
board.Servos.write(type_servo_pin, 180)
board.Servos.write(choice_servo_pin, 0)
time.sleep(0.5)
board.Servos.write(type_servo_pin, 90) # boot sequence
board.Servos.write(choice_servo_pin, 90) # boot sequence
time.sleep(1)
if 'listen' in command:
TTS.record_audio()
TTS.audio_file_chunks()
elif 'read' in command:
TTS.read_last_poem()
elif 'save' in command:
Functions.save_poem_database()
elif 'retrieve' in command:
Functions.retrieve_from_database()
elif 'generate' in command:
time.sleep(1)
if 'shakespeare' in command:
source = 'shakespeare.txt'
print('Text Source: Shakespeare\n')
board.Servos.write(choice_servo_pin, 160)
time.sleep(1)
elif 'bible' in command:
source = 'bible.txt'
print('Text Source: The Bible\n')
board.Servos.write(choice_servo_pin, 70)
time.sleep(1)
elif 'recorded' in command:
source = 'recording_audio_temp.txt'
print('Text Source: Audio Recording\n')
board.Servos.write(choice_servo_pin, 20)
time.sleep(1)
else:
source = 'poe_all.txt'
print('Text Source: Edgar AlLan Poe\n')
board.Servos.write(choice_servo_pin, 110)
time.sleep(1)
if 'haiku' in command:
haiku = True
length = 3
print('Poetry type: haiku\n')
board.Servos.write(type_servo_pin, 20)
time.sleep(1)
elif 'short' in command:
haiku = False
length = 4
print('Poetry type: Free Form\n')
print('Poetry length: Short\n')
board.Servos.write(type_servo_pin, 70)
time.sleep(1)
elif 'long' in command:
haiku = False
length = 12
print('Poetry type: Free Form\n')
print('Poetry length: Long\n')
board.Servos.write(type_servo_pin, 160)
time.sleep(1)
else:
haiku = False
length = 8
print('Poetry type: Free Form\n')
print('Poetry length: Medium\n')
board.Servos.write(type_servo_pin, 110)
time.sleep(1)
Functions.run_generator(source, length, haiku)
TTS.read_last_poem()
else:
TTS.talk('I was not able to understand the command.\n')
print('I was not able to understand the command.\n')
time.sleep(1)
board.Servos.write(choice_servo_pin, 90)
time.sleep(1)
board.Servos.write(type_servo_pin, 90)
time.sleep(1)
# ------------------------------------- Generator Main run -----------------------------------------------------------
if __name__ == "__main__":
while True:
Functions.run_edgar() |
BASE_URL = "https://financialmodelingprep.com/api"
INDEX_PREFIX = "^"
SUPPORTED_INTERVALS = ["1min","5min","15min","30min","1hour","4hour"]
SUPPORTED_CATEGORIES = [
'profile',
'quote',
'quote-short',
'quotes',
'search',
'search-ticker',
'income-statement',
'balance-sheet-statement',
'cash-flow-statement',
'ratios',
'enterprise-values',
'key-metrics',
'financial-growth',
'rating',
'discounted-cash-flow',
'historical-discounted-cash-flow',
'stock',
'earning_calendar',
'historical',
'historical-chart',
'historical-price-full',
'stock-screener',
'rss_feed',
'sp500_constituent',
'actives',
'gainers',
'losers',
'market-hours',
'sectors-performance',
'financial-statement-symbol-lists',
'historical/earning_calendar',
'earning_call_transcript',
'analyst-estimates'
]
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
import seaborn as sns
from torchvision import datasets, transforms, models
import argparse
import json
from PIL import Image
import time
import argparse
def args():
parser = argparse.ArgumentParser(description='Image Classifier Application')
parser.add_argument('--save_dir', type=str, help='define the save directory(string) for checkpoints')
parser.add_argument('--arch', type=str, help='choose the pre-trained model from torchvision.models')
parser.add_argument('--learning_rate', type=float, help='define the learning rate as float')
parser.add_argument('--hidden_units', type=int, help='define the number of hidden units for model.classifier as int')
parser.add_argument('--epochs', type=int, help='define the number of epochs for training as int')
parser.add_argument('--gpu', type=str, help='Use GPU for training')
results = parser.parse_args()
return results
def train_transformer(train_dir):
train_transform = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],[0.229,0.224,0.225])])
train_dataset = datasets.ImageFolder(train_dir, transform = train_transform)
return train_dataset
def train_loader(train_dataset):
trainloader = torch.utils.data.DataLoader(train_dataset, batch_size = 64, shuffle=True)
return trainloader
def val_transformer(val_dir):
val_transform = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],[0.229,0.224,0.225])])
val_dataset = datasets.ImageFolder(val_dir, transform = val_transform)
return val_dataset
def val_loader(val_dataset):
valloader = torch.utils.data.DataLoader(val_dataset, batch_size = 64)
return valloader
def test_transformer(test_dir):
test_transform = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],[0.229,0.224,0.225])])
test_dataset = datasets.ImageFolder(test_dir, transform = test_transform)
return test_dataset
def test_loader(test_dataset):
testloader = torch.utils.data.DataLoader(test_dataset, batch_size = 64)
return testloader
def check_gpu(gpu):
if gpu == False:
return torch.device('cpu')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if device == "cpu":
print("no gpu found, using cpu")
return device
def model_load(architecture = 'densenet121'):
if type(architecture) == type(None):
model = models.densenet121(pretrained=True)
model.name = 'densenet121'
print('desenet121 is used as network architecture')
else:
exec("model = models.{}(pretrained+True".format(architecture))
model.name = architecture
for param in model.parameters():
param.requires_grad = False
return model
def model_classifier(model, hidden_units):
if type(hidden_units) == type(None):
hidden_units = 512
print("model uses 512 hidden units")
input_features = model.classifier.in_features
classifier = nn.Sequential(nn.Linear(input_features, hidden_units),
nn.ReLU(),
nn.Dropout(0.3),
nn.Linear(hidden_units,102),
nn.LogSoftmax(dim=1))
return classifier
def nn_trainer(Model, Trainloader, Validloader, Device, Criterion, Optimizer, Epochs):
if type(Epochs) == type(None):
Epochs = 5
print("5 epochs will be used in training")
if Epochs != 0:
for epoch in range(Epochs):
running_loss = 0
for images, labels in Trainloader:
images, labels = images.to(Device), labels.to(Device)
Optimizer.zero_grad()
output = Model.forward(images)
loss = Criterion(output, labels)
loss.backward()
Optimizer.step()
running_loss += loss.item()
#validation part
else:
val_loss = 0
val_accuracy = 0
Model.eval()
with torch.no_grad():
for images, labels in Validloader:
images, labels = images.to(Device), labels.to(Device)
outputs = Model.forward(images)
v_loss = Criterion(outputs, labels)
val_loss += v_loss.item()
#validation accuracy
ps = torch.exp(outputs)
top_class = ps.topk(1, dim=1)
equals = (labels.data == ps.max(dim=1)[1])
val_accuracy += torch.mean(equals.type(torch.FloatTensor))
print(f"Epoch {epoch+1}/{Epochs}.. "
f"training loss: {running_loss/len(Trainloader):.3f}.. "
f"validation loss: {val_loss/len(Validloader):.3f}.. "
f"validation accuracy: {val_accuracy/len(Validloader):.3f}..")
return Model
def testing(model, testloader, criterion, device):
model.to(device)
model.eval()
test_loss = 0
test_accuracy = 0
for images, labels in testloader:
images, labels = images.to(device), labels.to(device)
outputs = model(images)
loss = criterion(outputs, labels)
test_loss += loss.item()
ps = torch.exp(outputs)
top_p, top_class = ps.topk(1, dim = 1)
equals = (labels.data == ps.max(dim=1)[1])
test_accuracy += torch.mean(equals.type(torch.FloatTensor))
print(f"Test loss: {test_loss/len(testloader):.3f}")
print(f"Test accuracy: {test_accuracy/len(testloader) * 100:.1f}%")
def save_checkpoint(Model, Dir, Train_data):
if type(Dir) == type(None):
print("no directory, model will not be saved")
# TODO: Save the checkpoint
torch.save({
'train_indices': Train_data.class_to_idx,
'model_state_dict': Model.state_dict(),
'model_cpu': Model.cpu,
'model_cuda': Model.cuda,
'classifier': Model.classifier,
'architecture': Model.name
}, 'checkpoint.pth')
# combining all functions
def main():
arg = args()
#uploading data
data_dir = 'flowers'
train_dir = data_dir + '/train'
val_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
train_data = train_transformer(train_dir)
val_data = val_transformer(val_dir)
test_data = test_transformer(test_dir)
trainloader = train_loader(train_data)
valloader = val_loader(val_data)
testloader = test_loader(test_data)
#loading model
model = model_load(architecture = arg.arch)
#classifier
model.classifier = model_classifier(model, hidden_units = arg.hidden_units)
#gpu
device = check_gpu(arg.gpu)
model.to(device)
#learn_rate
if type(arg.learning_rate) == type(None):
learning_rate = 0.003
print("learning rate selected as 0.003")
else:
learning_rate = arg.learning_rate
#criterion and optimizer
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)
#training the model
start = time.time()
nn_trainer(model, trainloader, valloader, device, criterion, optimizer, arg.epochs)
print(f"Time taken: {time.time() - start:.3f} sec")
testing(model, testloader, criterion, device)
save_checkpoint(model,arg.save_dir,train_data)
# run
if __name__ == '__main__': main() |
from redis import Redis
import time
import threading
def notrans():
conn = Redis("127.0.0.1",6379)
print conn.incr('notrans:')
time.sleep(.1)
conn.incr('notrans:',-1)
def __main__():
for i in xrange(3):
threading.Thread(target=notrans).start()
time.sleep(.5) |
import argparse
import os
from pd_mesh_net.utils import BaseTrainingJob
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--f',
type=str,
help="Path to the folder containing the pretrained model to evaluate "
"and the training parameters.",
required=True)
parser.add_argument(
'--checkpoint_batch_frequency',
type=int,
help=
"Frequency (in batches) of checkpoint saving; if passed, overrides the "
"argument `checkpoint_epoch_frequency`.")
parser.add_argument('--checkpoint_epoch_frequency',
type=int,
help="Frequency (in epochs) of checkpoint saving.",
default=1)
parser.add_argument('--last_epoch',
type=int,
help="Last training epoch.",
required=True)
parser.add_argument('--verbose',
help="If passed, will display verbose prints.",
action='store_true')
args = parser.parse_args()
training_job_folder = os.path.abspath(args.f)
assert (os.path.exists(training_job_folder)
), f"Could not find the training job folder {training_job_folder}."
log_folder = os.path.dirname(training_job_folder)
training_job_name = os.path.basename(training_job_folder)
checkpoint_batch_frequency = None
checkpoint_epoch_frequency = args.checkpoint_epoch_frequency
if (args.checkpoint_batch_frequency):
checkpoint_batch_frequency = args.checkpoint_batch_frequency
checkpoint_epoch_frequency = None
print("Saving checkpoints in epoch-and-batch format, with a checkpoint "
"saved every {checkpoint_batch_frequency} checkpoints.")
# Create training job.
training_job = BaseTrainingJob(
final_training_epoch=args.last_epoch,
log_folder=log_folder,
checkpoint_batch_frequency=checkpoint_batch_frequency,
checkpoint_epoch_frequency=checkpoint_epoch_frequency,
training_job_name=training_job_name,
verbose=args.verbose)
# Run training job.
training_job.train() |
"""empty message
Revision ID: bbb6eae59947
Revises: 11d65b51ea1d
Create Date: 2020-08-15 21:51:23.925183
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'bbb6eae59947'
down_revision = '11d65b51ea1d'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('follow_relationships',
sa.Column('id_', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('created_at', sa.TIMESTAMP(), nullable=True),
sa.Column('updated_at', sa.TIMESTAMP(), nullable=True),
sa.Column('left_id', sa.Integer(), nullable=False),
sa.Column('right_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['left_id'], ['users.id_'], ),
sa.ForeignKeyConstraint(['right_id'], ['users.id_'], ),
sa.PrimaryKeyConstraint('id_')
)
op.create_table('friend_relationships',
sa.Column('id_', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('created_at', sa.TIMESTAMP(), nullable=True),
sa.Column('updated_at', sa.TIMESTAMP(), nullable=True),
sa.Column('left_id', sa.Integer(), nullable=False),
sa.Column('right_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['left_id'], ['users.id_'], ),
sa.ForeignKeyConstraint(['right_id'], ['users.id_'], ),
sa.PrimaryKeyConstraint('id_')
)
op.add_column('users', sa.Column('gender', sa.Boolean(), nullable=True))
op.add_column('users', sa.Column('phone_number', sa.String(length=11), nullable=True))
op.add_column('users', sa.Column('photo_url', sa.String(length=255), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'photo_url')
op.drop_column('users', 'phone_number')
op.drop_column('users', 'gender')
op.drop_table('friend_relationships')
op.drop_table('follow_relationships')
# ### end Alembic commands ###
|
import torch
import itertools
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
from util.metrics import PSNR, roll_2
import pytorch_msssim
from util.util import fft2, ifft2
from util.hfen import hfen
class DLMRIModel(BaseModel):
def name(self):
return 'DLMRIModel'
def gradient_penalty(self, y, x):
"""Compute gradient penalty: (L2_norm(dy/dx) - 1)**2."""
weight = torch.ones(y.size()).to(self.device)
dydx = torch.autograd.grad(outputs=y,
inputs=x,
grad_outputs=weight,
retain_graph=True,
create_graph=True,
only_inputs=True)[0]
dydx = dydx.view(dydx.size(0), -1)
dydx_l2norm = torch.sqrt(torch.sum(dydx ** 2, dim=1))
return torch.mean((dydx_l2norm - 1) ** 2)
# Initialize the model
def initialize(self, opt):
BaseModel.initialize(self, opt) # ATTENTION HERE: NEED TO ALTER THE DEFAULT PLAN
self.netG_I = networks.define_G(opt, opt.input_nc, opt.output_nc,
opt.ngf, opt.which_model_netG_I, opt.norm, not opt.no_dropout, opt.init_type,
opt.init_gain, self.gpu_ids)
if self.isTrain:
if self.train_phase == 'generator':
self.model_names = ['G_I']
self.loss_names = ['G_I_L1', 'G_I_L2', 'SSIM', 'PSNR']
else:
self.model_names = ['G_I', 'D_I']
self.loss_names = ['G_GAN_I', 'G_I_L1', 'G_I_L2', 'D_GAN_I', 'SSIM', 'PSNR']
else: # during test time, only load Gs
self.model_names = ['G_I']
self.loss_names = ['SSIM', 'PSNR', 'HFEN']
if self.isTrain:
if self.train_phase == 'generator':
self.visual_names = ['kreal', 'Ireal', 'Ifake', 'Iunder', 'mask', 'Preal', 'IDL']
else:
self.visual_names = ['kreal', 'Ireal', 'Ifake', 'Iunder', 'mask', 'Preal', 'IDL']
else:
self.visual_names = ['Ireal', 'Ifake', 'Iunder', 'mask', 'IDL']
self.criterionL1 = torch.nn.L1Loss()
self.criterionMSE = torch.nn.MSELoss()
self.ssim_loss = pytorch_msssim.SSIM(val_range=2)
if self.isTrain:
self.optimizers = []
self.optimizer_G = torch.optim.Adam(list((self.netG_I.parameters())),
lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_G)
if self.isTrain and self.train_phase == 'together':
self.no_wgan = opt.no_wgan
self.no_wgan_gp = opt.no_wgan_gp
if self.no_wgan_gp == False:
self.disc_step = opt.disc_step
else:
self.disc_step = 1
self.disc_model = opt.disc_model
use_sigmoid = opt.no_lsgan
if opt.disc_model == 'pix2pix':
self.netD_I = networks.define_D(opt.input_nc + opt.output_nc, opt.ndf,
opt.which_model_netD_I,
opt.n_layers_D_I, opt.norm, use_sigmoid, opt.init_type,
opt.init_gain,
self.gpu_ids)
if opt.disc_model == 'traditional':
self.netD_I = networks.define_D(opt.output_nc, opt.ndf, opt.which_model_netD_I,
opt.n_layers_D_I, opt.norm, use_sigmoid, opt.init_type,
opt.init_gain,
self.gpu_ids)
self.loss_wgan_gp = opt.loss_wgan_gp
self.fake_I_pool = ImagePool(opt.pool_size)
self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan, use_l1=not opt.no_l1gan).to(self.device)
self.optimizer_D_I = torch.optim.Adam(self.netD_I.parameters(),
lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_D_I)
def set_input(self, input):
self.kreal = input['kreal'].to(self.device)
self.smap = input['smap'].to(self.device)
self.mask = input['mask'].to(self.device)
self.IDL = input['DLRecon'].to(self.device)
self.AT = networks.OPAT(self.smap)
self.A = networks.OPA(self.smap)
self.Iunder = self.AT(self.kreal, self.mask)
self.Ireal = self.AT(self.kreal, torch.ones_like(self.mask))
self.scale = torch.mean(torch.abs(self.Iunder))
self.Preal = torch.atan(self.Ireal[:, 1, :, :] / self.Ireal[:, 0, :, :]).unsqueeze(1)
self.image_paths = input['path']
self.ssim_loss = pytorch_msssim.SSIM(val_range=2 * torch.max(torch.abs(self.Ireal)).cpu().float()).to(self.device)
def backward_G(self):
# First, G(A) should fake the discriminator
if self.isTrain and self.train_phase == 'together':
if self.disc_model == 'pix2pix':
fake_AB_I = torch.cat((self.Ifake, self.Iunder), 1)
pred_fake_I = self.netD_I(fake_AB_I)
if self.disc_model == 'traditional':
pred_fake_I = self.netD_I(self.Ifake)
if self.no_wgan == False:
self.loss_G_GAN_I = -pred_fake_I.mean()
else:
self.loss_G_GAN_I = self.criterionGAN(pred_fake_I, True)
else:
self.loss_G_GAN_I = 0
self.loss_G_GAN_I = self.loss_G_GAN_I * self.opt.loss_GAN_I
self.loss_G_GAN = self.loss_G_GAN_I
self.loss_G_I_L1 = self.criterionL1(self.Ifake, self.Ireal) * self.opt.loss_content_I_l1
self.loss_G_I_L2 = self.criterionMSE(self.Ifake, self.Ireal) * self.opt.loss_content_I_l2
self.loss_G_CON_I = self.loss_G_I_L1 + self.loss_G_I_L2
self.loss_G = self.loss_G_CON_I + self.loss_G_GAN - self.loss_SSIM * self.opt.loss_ssim
self.loss_G.backward()
def backward_D(self):
if self.disc_model == 'pix2pix':
fake_AB_I = self.fake_k_pool.query(self.Ifake)
pred_real_I = self.netD_I(torch.cat((self.Ireal, self.Iunder), 1))
pred_fake_I = self.netD_I(fake_AB_I.detach())
if self.disc_model == 'traditional':
fake_AB_I = self.fake_I_pool.query(self.Ifake)
pred_real_I = self.netD_I(self.Ireal)
pred_fake_I = self.netD_I(fake_AB_I.detach())
if self.no_wgan == False:
self.loss_D_GAN_fake_I = pred_fake_I.mean()
self.loss_D_GAN_real_I = -pred_real_I.mean()
elif self.no_wgan_gp == False:
self.loss_D_GAN_fake_I = pred_fake_I.mean()
self.loss_D_GAN_real_I = -pred_real_I.mean()
alpha = torch.rand(self.Ireal.size(0), 1, 1, 1).to(self.device)
x_hat = (alpha * self.Ireal.data + (1 - alpha) * self.Ifake.data).requires_grad_(True)
out_src = self.netD_I(x_hat)
self.d_loss_gp_I = self.gradient_penalty(out_src, x_hat) * self.loss_wgan_gp
else:
self.loss_D_GAN_fake_I = self.criterionGAN(pred_fake_I, False)
self.loss_D_GAN_real_I = self.criterionGAN(pred_real_I, True)
self.loss_D_GAN_I = 0.5 * (self.loss_D_GAN_fake_I + self.loss_D_GAN_real_I) * self.opt.loss_GAN_I
self.loss_D_GAN = self.loss_D_GAN_I * self.opt.beta
if self.no_wgan_gp == False:
self.loss_D_GAN = self.loss_D_GAN + self.d_loss_gp_I
self.loss_D_GAN.backward()
def forward(self):
# CG_A = networks.CG_A(self.opt.MODLtol, self.opt.MODLLambda)
CG = networks.CG.apply
# Ifake = self.Iunder # For S
Ifake = self.IDL # For B+S
for ii in range(self.opt.num_blocks):
Ifake1 = self.netG_I(Ifake)
Ifake = CG(Ifake1, self.opt.MODLtol, self.opt.MODLLambda, self.smap, self.mask, self.Iunder)
self.Ifake = Ifake
# For 'explicit' data-consistency
# Ifake = self.IDL
# for ii in range(self.opt.num_blocks):
# Ifake1 = self.netG_I(Ifake)
# Ifake = CG(Ifake1 + self.IDL, self.opt.MODLtol, self.opt.MODLLambda, self.smap, self.mask,
# self.Iunder)
# self.Ifake = Ifake + self.IDL
self.loss_PSNR = PSNR(self.Ireal, self.Ifake)
self.loss_SSIM = self.ssim_loss(self.Ireal, self.Ifake)
self.loss_HFEN = hfen(self.Ireal, self.Ifake, window_size=11, size_average=True, full=False, device=self.device)
# if not self.isTrain:
self.loss_SSIM_DL = self.ssim_loss(self.Ireal, self.IDL)
self.loss_SSIM_under = self.ssim_loss(self.Ireal, self.Iunder)
self.loss_PSNR_DL = PSNR(self.Ireal, self.IDL)
self.loss_PSNR_under = PSNR(self.Ireal, self.Iunder)
self.loss_HFEN_DL = hfen(self.Ireal, self.IDL, window_size=11, size_average=True, full=False,
device=self.device)
self.loss_HFEN_under = hfen(self.Ireal, self.Iunder, window_size=11, size_average=True, full=False,
device=self.device)
print('loss_SSIM_DL', self.loss_SSIM_DL, 'loss_SSIM_under', self.loss_SSIM_under, 'loss_PSNR_DL',
self.loss_PSNR_DL, 'loss_PSNR_under', self.loss_PSNR_under, 'loss_HFEN_DL', self.loss_HFEN_DL,
'loss_HFEN_under', self.loss_HFEN_under)
def optimize_parameters(self):
if self.isTrain and self.train_phase == 'together':
self.forward()
self.set_requires_grad(self.netD_I, True)
for iter_d in range(self.disc_step):
self.optimizer_D_I.zero_grad()
self.backward_D()
self.optimizer_D_I.step()
self.set_requires_grad(self.netD_I, False)
self.optimizer_G.zero_grad()
self.backward_G()
self.optimizer_G.step()
else:
self.forward()
self.optimizer_G.zero_grad()
self.backward_G()
self.optimizer_G.step()
|
import pytest
from copy import deepcopy
from string import printable
from optimization.utilities.random_values import generate_random_int, generate_random_float, choose_random_value, \
choose_random_values, shuffle, shuffled, choose_random_value_with_weights
class TestRandomFunctions:
"""
Tests for random functions.
TODO: It is worth considering whether randomness test would be beneficial here:
https://en.wikipedia.org/wiki/Randomness_tests
"""
SCRIPT_LOCATION = "optimization.utilities.random_values"
@pytest.mark.parametrize("min_value, max_value, samples", [(1, 10, 100), (-100, 100, 2000)])
def test_generate_random_int__value_in_range(self, min_value, max_value, samples):
"""
Check that 'generate_random_int' function return integer value in given range.
:param min_value: Minimal possible random value.
:param max_value: Maximal possible random value.
:param samples: Number of test repetitions.
"""
for _ in range(samples):
value = generate_random_int(min_value, max_value)
assert isinstance(value, int) and min_value <= value <= max_value
@pytest.mark.parametrize("min_value, max_value, samples", [(1., 10., 100), (-100., 100., 2000)])
def test_generate_random_float__value_in_range(self, min_value, max_value, samples):
"""
Check that 'generate_random_float' function return float value in given range.
:param min_value: Minimal possible random value.
:param max_value: Maximal possible random value.
:param samples: Number of test repetitions.
"""
for _ in range(samples):
value = generate_random_float(min_value, max_value)
assert isinstance(value, float) and min_value <= value <= max_value
@pytest.mark.parametrize("values_pool, samples", [
({"white", "red", "black", "green", "blue", "gray", "brown", "purple", "pink", "yellow"}, 100),
(set(range(-10, 11)), 200),
])
def test_choose_random_value__value_in_pool(self, values_pool, samples):
"""
Check that 'choose_random_value' function returns value from given pool.
:param values_pool: Minimal possible random value.
:param samples: Number of test repetitions.
"""
for _ in range(samples):
value = choose_random_value(values_pool)
assert value in values_pool
@pytest.mark.parametrize("values_pool, weights, samples", [
(["white", "red", "black", "green", "blue", "gray", "brown", "purple", "pink", "yellow"], list(range(10)), 100),
(range(-10, 11), [1, 2, 3] * 7, 200),
])
def test_choose_random_value_with_weights__value_in_pool(self, values_pool, weights, samples):
"""
Check that 'choose_random_value_with_weights' function returns value from given pool.
:param values_pool: Minimal possible random value.
:param weights: Examples weights values.
:param samples: Number of test repetitions.
"""
for _ in range(samples):
value = choose_random_value_with_weights(values_pool=values_pool, weights=weights)
assert value in values_pool
@pytest.mark.parametrize("values_pool, samples", [
({"white", "red", "black", "green", "blue", "gray", "brown", "purple", "pink", "yellow"}, 100),
(set(range(-10, 11)), 200),
])
@pytest.mark.parametrize("values_number", [2, 3, 5])
def test_choose_random_values__values_in_pool(self, values_pool, values_number, samples):
"""
Check that 'choose_random_values' function returns values from given pool.
:param values_pool: Minimal possible random value.
:poram values_number: Number of values to be picked.
:param samples: Number of test repetitions.
"""
for _ in range(samples):
values = choose_random_values(values_pool, values_number)
assert isinstance(values, list) and len(values) == values_number \
and all([value in values_pool for value in values])
@pytest.mark.random
@pytest.mark.parametrize("values", [list(range(1000)), list(printable)])
def test_shuffle__values(self, values):
"""
Check that 'shuffle' function changes value in place (inside the list).
:param values: List of values to shuffle.
"""
copy_input_values = deepcopy(values)
shuffle(values)
assert set(copy_input_values) == set(values) and isinstance(values, list) \
and any([copy_input_values[i] != values[i] for i in range(len(values))])
@pytest.mark.random
@pytest.mark.parametrize("values", [list(range(1000)), list(printable)])
def test_shuffled__values(self, values):
"""
Check that 'shuffle' function changes value in place (inside the list).
:param values: List of values to shuffle.
"""
copy_input_values = deepcopy(values)
output_values = shuffled(values)
assert copy_input_values == values, "Input were unchanged"
assert set(values) == set(output_values) and isinstance(output_values, list) \
and any([output_values[i] != values[i] for i in range(len(values))])
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys, os, pwd, grp, signal, time
from resource_management import *
from subprocess import call
from common import *
def setup_hue():
import params
import status_params
Logger.info("Configure Hue Service")
# create the pid and log dir
Directory([params.hue_log_dir, params.hue_pid_dir],
mode=0755,
cd_access='a',
owner=params.hue_user,
group=params.hue_group,
create_parents=True
)
File([params.hue_log_file, params.hue_server_pid_file],
mode=0644,
owner=params.hue_user,
group=params.hue_group,
content=''
)
## Delete from HDP3.0 && HUE4.3.0
#Logger.info("Creating symlinks /usr/hdp/current/hadoop-client/lib/hue-plugins-4.3.0-SNAPSHOT.jar")
#Link("{0}/desktop/libs/hadoop/java-lib/*".format(params.hue_dir),to = "/usr/hdp/current/hadoop-client/lib")
Execute('find {0} -iname "*.sh" | xargs chmod +x'.format(params.service_packagedir))
# Create a home directory for solr user on HDFS
params.HdfsResource(params.hue_hdfs_home_dir,
type="directory",
action="create_on_execute",
owner=params.hue_user,
mode=0755,
recursive_chmod=True
)
Logger.info(format("Creating {hue_conf_dir}/log.conf file"))
File(format("{hue_conf_dir}/log.conf"),
content = InlineTemplate(params.hue_log_content),
owner = params.hue_user
)
Logger.info(format("Creating {hue_conf_dir}/pseudo-distributed.ini config file"))
File(format("{hue_conf_dir}/pseudo-distributed.ini"),
content = InlineTemplate(params.hue_pseudodistributed_content),
owner = params.hue_user
)
Logger.info(format("Run the script file to add configurations"))
if params.hue_hdfs_module_enabled == 'Yes':
add_hdfs_configuration(params.has_ranger_admin, params.security_enabled)
if params.hue_hbase_module_enabled == 'Yes':
add_hbase_configuration(params.has_ranger_admin, params.security_enabled)
if params.hue_hive_module_enabled == 'Yes':
add_hive_configuration(params.has_ranger_admin, params.security_enabled)
if params.hue_oozie_module_enabled == 'Yes':
add_oozie_configuration(params.has_ranger_admin, params.security_enabled)
if params.hue_spark_module_enabled == 'Yes':
add_spark_configuration(params.has_ranger_admin, params.security_enabled)
|
import os
import random
import string
from bs4 import BeautifulSoup
import pytest
from uaaextras.clients import UAAClient
from .integration_test import IntegrationTestClient
@pytest.fixture
def config():
config = {}
urls = {}
urls["uaa"] = os.environ["UAA_URL"]
urls["extras"] = os.environ["EXTRAS_URL"]
urls["idp"] = os.environ["IDP_URL"]
for url in urls:
if not urls[url][0:4] == "http":
urls[url] = "https://" + urls[url]
config["urls"] = urls
config["idp_name"] = os.environ["IDP_NAME"]
config["uaa_client"] = os.environ["UAA_USER"]
config["uaa_secret"] = os.environ["UAA_SECRET"]
return config
@pytest.fixture
def uaa(config):
uaac = UAAClient(config["urls"]["uaa"], None, verify_tls=True)
token = uaac._get_client_token(config["uaa_client"], config["uaa_secret"])
uaac.token = token
return uaac
@pytest.fixture
def user(uaa, config):
user = {}
user["name"] = (
"noreply+" + "".join(random.choices(string.ascii_lowercase, k=8)) + "@cloud.gov"
)
user["password"] = "".join(
random.choices(
string.ascii_lowercase + string.ascii_uppercase + string.digits, k=20
)
)
r = uaa.create_user(
user["name"],
"unimportant",
"alsounimportant",
user["name"],
password=user["password"],
origin="cloud.gov",
)
uaa.set_temporary_password(
config["uaa_client"], config["uaa_secret"], user["name"], user["password"]
)
yield user
uaa.delete_user(r["id"])
@pytest.fixture
def unauthenticated(config):
itc = IntegrationTestClient(
config["urls"]["extras"],
config["urls"]["idp"],
config["urls"]["uaa"],
config["idp_name"],
)
return itc
@pytest.fixture
def authenticated(unauthenticated, user):
token, changed = unauthenticated.log_in(user["name"], user["password"])
if changed:
user["token"] = token
return unauthenticated
def get_csrf(page_text) -> str:
page = BeautifulSoup(page_text, features="html.parser")
csrf = page.find(attrs={"name": "_csrf_token"}).attrs["value"]
return csrf
@pytest.mark.parametrize("page", ["/invite", "/change-password", "/first-login"])
def test_unauthenticated_pages_redirect(unauthenticated, page, config):
r = unauthenticated.get_page(page)
assert r.status_code == 200
assert r.url == config["urls"]["uaa"] + "/login"
# NOTE: Needs to be first test as long as we do not have a totp-reset method
def test_login_no_totp(unauthenticated, config, user):
# log in to get/set our totp
token, changed = unauthenticated.log_in(user["name"], user["password"])
assert changed
# log out, so log in will work
unauthenticated.log_out()
# log in again to make sure we have the right totp
_, changed = unauthenticated.log_in(user["name"], user["password"], token)
assert not changed
def test_reset_totp(authenticated, user):
# get the page so we have a CSRF
r = authenticated.get_page("/reset-totp")
assert r.status_code == 200
csrf = get_csrf(r.text)
# actually reset our totp
r = authenticated.post_to_page("/reset-totp", data={"_csrf_token": csrf})
assert r.status_code == 200
# reset-totp is supposed to log a user out. Logging in should reset our totp
token, changed = authenticated.log_in(user["name"], user["password"])
assert changed
@pytest.mark.parametrize("page", ["/invite", "/change-password"])
def test_authenticated_pages_work(authenticated, page, config):
r = authenticated.get_page(page)
assert r.status_code == 200
assert r.url == config["urls"]["extras"] + page
def test_change_password(authenticated, config, user):
r = authenticated.get_page("/change-password")
soup = BeautifulSoup(r.text, features="html.parser")
csrf = soup.find(attrs={"name": "_csrf_token"}).attrs["value"]
data = {
"old_password": user["password"],
"new_password": "a_severely_insecure_password",
"repeat_password": "a_severely_insecure_password",
"_csrf_token": csrf,
}
r = authenticated.post_to_page("/reset-password", data=data)
assert r.status_code == 200
# set the password back so we don't confuse the other tests
r = authenticated.get_page("/change-password")
csrf = get_csrf(r.text)
data = {
"old_password": "a_severely_insecure_password",
"new_password": user["password"],
"repeat_password": user["password"],
"_csrf_token": csrf,
}
@pytest.mark.skip("Not done yet")
def test_invites_happy_path(authenticated, config):
if "dev" in config["urls"]["uaa"]:
# alternate path: use dev, but expect the request to fail
# email fails, but its also the last thing to happen, so the user
# is created and their invite info can still be fetched from Redis
pytest.skip("Can't test functions that require email in dev")
r = authenticated.get_page("/invite")
csrf = get_csrf(r.text)
soup = BeautifulSoup(r.text, features="html.parser")
form = soup.find("form")
url = form.attrs["action"]
payload = {"email": "", "_csrf_token": csrf}
r = authenticated.post_to_page(url, data=payload)
soup = BeautifulSoup(r.text, features="html.parser")
# TODO: finish this.
# Happy path sketch:
# - get the invite info straight from Redis
# - redeem invite
# - log in for the first time
# other tests to run:
# - bad email
# - bad csrf
# - no csrf
|
from pyvsr53dl.vsr53dl import PyVSR53DL
from pyvsr53dl.DisplayModes import Units as Units
from pyvsr53dl.DisplayModes import Orientation as Orientation
from pyvsr53dl.logger import log
import logging
if __name__ == '__main__':
from pyvsr53dl.sys import dev_tty
log.setLevel(logging.INFO)
sensor_address = 1
vacuum_sense = PyVSR53DL(dev_tty, sensor_address)
vacuum_sense.open_communication()
vacuum_sense.get_device_type()
vacuum_sense.get_product_name()
vacuum_sense.get_serial_number_device()
vacuum_sense.get_serial_number_head()
vacuum_sense.get_response_delay()
vacuum_sense.get_device_version()
vacuum_sense.get_firmware_version()
vacuum_sense.get_bootloader_version()
vacuum_sense.get_measurement_range()
vacuum_sense.get_measurement_value()
vacuum_sense.get_measurement_value_piezo()
vacuum_sense.get_measurement_value_pirani()
vacuum_sense.set_display_unit(Units.MBAR)
vacuum_sense.get_display_unit()
vacuum_sense.set_display_orientation(Orientation.NORMAL)
vacuum_sense.get_display_orientation()
vacuum_sense.get_relay_1_status()
vacuum_sense.get_relay_2_status()
vacuum_sense.get_operating_hours()
vacuum_sense.close_communication() |
default_app_config = "my_wallet.wallets.apps.WalletsConfig"
|
##########################################################################
#
# CUDA code generator
#
# This routine is called by op2 which parses the input files
#
# It produces a file xxx_kernel.cu for each kernel,
# plus a master kernel file
#
##########################################################################
import re
import datetime
import os
def comm(line):
global file_text, FORTRAN, CPP
global depth
prefix = ' '*depth
if len(line) == 0:
file_text +='\n'
elif FORTRAN:
file_text +='! '+line+'\n'
elif CPP:
file_text +=prefix+'//'+line.rstrip()+'\n'
def code(text):
global file_text, FORTRAN, CPP, g_m
global depth
if text == '':
prefix = ''
else:
prefix = ' '*depth
file_text += prefix+text.rstrip()+'\n'
def FOR(i,start,finish):
global file_text, FORTRAN, CPP, g_m
global depth
if FORTRAN:
code('do '+i+' = '+start+', '+finish+'-1')
elif CPP:
code('for ( int '+i+'='+start+'; '+i+'<'+finish+'; '+i+'++ ){')
depth += 2
def FOR_INC(i,start,finish,inc):
global file_text, FORTRAN, CPP, g_m
global depth
if FORTRAN:
code('do '+i+' = '+start+', '+finish+'-1')
elif CPP:
code('for ( int '+i+'='+start+'; '+i+'<'+finish+'; '+i+'+='+inc+' ){')
depth += 2
def ENDFOR():
global file_text, FORTRAN, CPP, g_m
global depth
depth -= 2
if FORTRAN:
code('enddo')
elif CPP:
code('}')
def IF(line):
global file_text, FORTRAN, CPP, g_m
global depth
if FORTRAN:
code('if ('+line+') then')
elif CPP:
code('if ('+ line + ') {')
depth += 2
def ENDIF():
global file_text, FORTRAN, CPP, g_m
global depth
depth -= 2
if FORTRAN:
code('endif')
elif CPP:
code('}')
def op2_gen_cuda_simple_hyb(master, date, consts, kernels,sets):
global dims, idxs, typs, indtyps, inddims
global FORTRAN, CPP, g_m, file_text, depth
OP_ID = 1; OP_GBL = 2; OP_MAP = 3;
OP_READ = 1; OP_WRITE = 2; OP_RW = 3;
OP_INC = 4; OP_MAX = 5; OP_MIN = 6;
accsstring = ['OP_READ','OP_WRITE','OP_RW','OP_INC','OP_MAX','OP_MIN' ]
depth = 0
FORTRAN = 0
CPP = 1
g_m = 0
##########################################################################
# output one master kernel file
##########################################################################
file_text = ''
comm('header')
code('#ifdef GPUPASS')
for nk in range (0,len(kernels)):
name = kernels[nk]['name']
code('#define op_par_loop_'+name+' op_par_loop_'+name+'_gpu')
code('#include "'+master.split('.')[0]+'_kernels.cu"')
for nk in range (0,len(kernels)):
name = kernels[nk]['name']
code('#undef op_par_loop_'+name)
code('#else')
for nk in range (0,len(kernels)):
name = kernels[nk]['name']
code('#define op_par_loop_'+name+' op_par_loop_'+name+'_cpu')
code('#include "../openmp/'+master.split('.')[0]+'_kernels.cpp"')
for nk in range (0,len(kernels)):
name = kernels[nk]['name']
code('#undef op_par_loop_'+name)
code('')
comm('user kernel files')
for nk in range(0,len(kernels)):
name = kernels[nk]['name']
unique_args = range(1,kernels[nk]['nargs']+1)
code('')
code('void op_par_loop_'+name+'_gpu(char const *name, op_set set,')
depth += 2
for m in unique_args:
g_m = m - 1
if m == unique_args[len(unique_args)-1]:
code('op_arg arg'+str(g_m)+');')
else:
code('op_arg arg'+str(g_m)+',')
depth -= 2
code('')
comm('GPU host stub function')
code('#if OP_HYBRID_GPU')
code('void op_par_loop_'+name+'(char const *name, op_set set,')
depth += 2
for m in unique_args:
g_m = m - 1
if m == unique_args[len(unique_args)-1]:
code('op_arg arg'+str(g_m)+'){')
code('')
else:
code('op_arg arg'+str(g_m)+',')
IF('OP_hybrid_gpu')
code('op_par_loop_'+name+'_gpu(name, set,')
depth += 2
for m in unique_args:
g_m = m - 1
if m == unique_args[len(unique_args)-1]:
code('arg'+str(g_m)+');')
code('')
else:
code('arg'+str(g_m)+',')
depth -=2
code('}else{')
code('op_par_loop_'+name+'_cpu(name, set,')
depth += 2
for m in unique_args:
g_m = m - 1
if m == unique_args[len(unique_args)-1]:
code('arg'+str(g_m)+');')
code('')
else:
code('arg'+str(g_m)+',')
depth -=2
ENDIF()
depth-=2
code('}')
code('#else')
code('void op_par_loop_'+name+'(char const *name, op_set set,')
depth += 2
for m in unique_args:
g_m = m - 1
if m == unique_args[len(unique_args)-1]:
code('op_arg arg'+str(g_m)+'){')
code('')
else:
code('op_arg arg'+str(g_m)+',')
code('op_par_loop_'+name+'_gpu(name, set,')
depth += 2
for m in unique_args:
g_m = m - 1
if m == unique_args[len(unique_args)-1]:
code('arg'+str(g_m)+');')
code('')
else:
code('arg'+str(g_m)+',')
depth-=2
code('}')
depth-=2
code('#endif //OP_HYBRID_GPU')
code("#endif")
master = master.split('.')[0]
fid = open('cuda/'+master.split('.')[0]+'_hybkernels.cu','w')
fid.write('//\n// auto-generated by op2.py\n//\n\n')
fid.write(file_text)
fid.close()
|
from SBaaS_base.postgresql_orm_base import *
class data_stage01_isotopomer_averages(Base):
__tablename__ = 'data_stage01_isotopomer_averages'
#id = Column(Integer, Sequence('data_stage01_isotopomer_averages_id_seq'), primary_key=True)
experiment_id = Column(String(50))
sample_name_abbreviation = Column(String(100))
sample_type = Column(String(100))
time_point = Column(String(10))
met_id = Column(String(100))
fragment_formula = Column(String(500))
fragment_mass = Column(Integer)
n_replicates = Column(Integer)
intensity_normalized_average = Column(Float)
intensity_normalized_cv = Column(Float)
intensity_normalized_units = Column(String(20))
intensity_theoretical = Column(Float)
abs_devFromTheoretical = Column(Float)
scan_type = Column(String(50));
used_ = Column(Boolean);
comment_ = Column(Text);
__table_args__ = (PrimaryKeyConstraint('experiment_id','sample_name_abbreviation','sample_type','met_id','time_point','fragment_formula','fragment_mass','scan_type'),
#UniqueConstraint('experiment_id','sample_name_abbreviation','sample_type','met_id','time_point','fragment_formula','fragment_mass','scan_type'),
)
def __init__(self, experiment_id_I, sample_name_abbreviation_I, sample_type_I, time_point_I, met_id_I,fragment_formula_I, fragment_mass_I,
n_replicates_I, intensity_normalized_average_I, intensity_normalized_cv_I,
intensity_normalized_units_I, intensity_theoretical_I, abs_devFromTheoretical_I, scan_type_I, used_I):
self.experiment_id = experiment_id_I;
self.sample_name_abbreviation = sample_name_abbreviation_I;
self.sample_type = sample_type_I;
self.time_point = time_point_I;
self.met_id = met_id_I;
self.fragment_formula = fragment_formula_I;
self.fragment_mass = fragment_mass_I;
self.n_replicates = n_replicates_I;
self.intensity_normalized_average = intensity_normalized_average_I;
self.intensity_normalized_cv = intensity_normalized_cv_I;
self.intensity_normalized_units = intensity_normalized_units_I;
self.intensity_theoretical = intensity_theoretical_I;
self.abs_devFromTheoretical = abs_devFromTheoretical_I;
self.used_ = used_I;
self.scan_type = scan_type_I;
class data_stage01_isotopomer_averagesNormSum(Base):
__tablename__ = 'data_stage01_isotopomer_averagesNormSum'
#TODO:
#DROP SEQUENCE "data_stage01_isotopomer_averagesNormSum_fragment_mass_seq";
#CREATE SEQUENCE "data_stage01_isotopomer_averagesNormSum_id_seq"
# INCREMENT 1
# MINVALUE 1
# MAXVALUE 9223372036854775807
# START 1
# CACHE 1;
#ALTER TABLE "data_stage01_isotopomer_averagesNormSum_id_seq"
# OWNER TO postgres;
#ALTER TABLE data_stage01_isotopomer_averagesNormSum ADD COLUMN id integer;
#ALTER TABLE data_stage01_isotopomer_averagesNormSum ALTER COLUMN id SET NOT NULL;
#ALTER TABLE data_stage01_isotopomer_averagesNormSum
# ADD CONSTRAINT data_stage01_isotopomer_averagesNormSum_id_key UNIQUE(id);
#id = Column(Integer, Sequence('data_stage01_isotopomer_averagesNormSum_id_seq'))
experiment_id = Column(String(50))
sample_name_abbreviation = Column(String(100))
sample_type = Column(String(100))
time_point = Column(String(10))
met_id = Column(String(100))
fragment_formula = Column(String(500))
fragment_mass = Column(Integer)
n_replicates = Column(Integer)
intensity_normalized_average = Column(Float)
intensity_normalized_cv = Column(Float)
intensity_normalized_units = Column(String(20))
intensity_theoretical = Column(Float)
abs_devFromTheoretical = Column(Float)
scan_type = Column(String(50));
used_ = Column(Boolean);
comment_ = Column(Text);
__table_args__ = (PrimaryKeyConstraint('experiment_id','sample_name_abbreviation','sample_type','met_id','time_point','fragment_formula','fragment_mass','scan_type'),
#UniqueConstraint('id'),
)
def __init__(self, experiment_id_I, sample_name_abbreviation_I, sample_type_I, time_point_I, met_id_I,fragment_formula_I, fragment_mass_I,
n_replicates_I, intensity_normalized_average_I, intensity_normalized_cv_I,
intensity_normalized_units_I, intensity_theoretical_I, abs_devFromTheoretical_I, scan_type_I, used_I, comment_I=None):
self.experiment_id = experiment_id_I;
self.sample_name_abbreviation = sample_name_abbreviation_I;
self.sample_type = sample_type_I;
self.time_point = time_point_I;
self.met_id = met_id_I;
self.fragment_formula = fragment_formula_I;
self.fragment_mass = fragment_mass_I;
self.n_replicates = n_replicates_I;
self.intensity_normalized_average = intensity_normalized_average_I;
self.intensity_normalized_cv = intensity_normalized_cv_I;
self.intensity_normalized_units = intensity_normalized_units_I;
self.intensity_theoretical = intensity_theoretical_I;
self.abs_devFromTheoretical = abs_devFromTheoretical_I;
self.used_ = used_I;
self.scan_type = scan_type_I;
self.comment_ = comment_I;
def __repr__dict__(self):
return {
#'id':self.id,
'experiment_id':self.experiment_id,
'sample_name_abbreviation':self.sample_name_abbreviation,
'sample_type':self.sample_type,
'time_point':self.time_point,
'met_id':self.met_id,
'fragment_formula':self.fragment_formula,
'fragment_mass':self.fragment_mass,
'intensity_normalized_average':self.intensity_normalized_average,
'intensity_normalized_cv':self.intensity_normalized_cv,
'intensity_normalized_units':self.intensity_normalized_units,
'intensity_theoretical':self.intensity_theoretical,
'abs_devFromTheoretical':self.abs_devFromTheoretical,
'scan_type':self.scan_type,
'used_':self.used_,
'comment_':self.comment_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__()) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.