content
stringlengths 5
1.05M
|
|---|
# Given an array of unsorted numbers and a target number,
# find all unique quadruplets in it, whose sum is equal to the target number.
# Example 1:
# Input: [4, 1, 2, -1, 1, -3], target=1
# Output: [-3, -1, 1, 4], [-3, 1, 1, 2]
# Explanation: Both the quadruplets add up to the target.
# Example 2:
# Input: [2, 0, -1, 1, -2, 2], target=2
# Output: [-2, 0, 2, 2], [-1, 0, 1, 2]
# Explanation: Both the quadruplets add up to the target.
# def getQuads(array, target):
# quads = []
# array.sort()
# for i in range(len(array) - 2):
# left = i + 1
# right = len(array) - 1
# while left<= right:
# fourthNum = target - (array[i] + array[left] + array[right] )
# # print(fourthNum)
# for j in range(left + 1, right):
# if array[j] == fourthNum:
# quads.append([array[i], array[left], array[j], array[right]])
# break
# if fourthNum < 0:
# right -= 1
# else:
# left += 1
# return quads
def getQuads(array, target):
array.sort()
quads = []
for i in range(len(array) - 3):
if i > 0 and array[i] == array[i - 1]:
continue
for j in range(i+1, len(array) - 2):
if j > i+1 and array[j] == array[j - 1]:
continue
search_pair(array, i, j, j + 1, target, quads)
return quads
def search_pair(array, x, y, left, target, quads):
right = len(array) - 1
while left < right:
currentSum = array[x] + array[y] + array[left] + array[right]
if currentSum == target:
quads.append([array[x], array[y], array[left], array[right]])
left += 1
right -= 1
while left < right and array[left] == array[left - 1]:
left += 1
while left < right and array[right] == array[right - 1]:
right -= 1
elif currentSum < target:
left += 1
else:
right -= 1
if __name__ == '__main__':
num1 = [4, 1, 2, -1, 1, -3]
target1 = 1
x1 = getQuads(num1, target1)
print(x1)
assert x1 == [[-3, -1, 1, 4], [-3, 1, 1, 2]]
num2 = [2, 0, -1, 1, -2, 2]
target2 = 2
x2 = getQuads(num2, target2)
print(x2)
assert x2 == [[-2, 0, 2, 2], [-1, 0, 1, 2]]
num3 = [7, 6, 4, -1, 1, 2]
target3 = 16
x3 = getQuads(num3, target3)
# assert x2 == [[-2, 0, 2, 2], [-1, 0, 1, 2]]
print(x3)
num4 = [1, 2, 3, 4, 5, 6, 7]
target4 = 10
x4 = getQuads(num4, target4)
# assert x2 == [[-2, 0, 2, 2], [-1, 0, 1, 2]]
print(x4)
|
"""Jena/Fuseki API client exceptions."""
class FusekiClientError(Exception):
"""Fuseki API client default error."""
class FusekiClientResponseError(FusekiClientError):
"""A response error from Fuseki server."""
class DatasetAlreadyExistsError(FusekiClientError):
"""Dataset already exists error."""
class DatasetNotFoundError(FusekiClientError):
"""Dataset not found error."""
class TaskNotFoundError(FusekiClientError):
"""Task not found error."""
class InvalidFileError(FusekiClientError):
"""Not a file error while uploading data."""
class EmptyDBError(FusekiClientError):
"""No result error while quering data."""
class UniquenessDBError(FusekiClientError):
"""Not unique result error while quering data."""
class ArgumentError(FusekiClientError):
"""Bad argument error while quering data."""
|
def search_schedule():
query = """
query ($page: Int) {
Page(page: $page, perPage: 50) {
pageInfo {
hasNextPage
}
media(status: RELEASING, type: ANIME, format: TV) {
nextAiringEpisode {
airingAt
}
title {
romaji
english
}
siteUrl
episodes
}
}
}
"""
return query
|
import os
from jinja2 import Environment, FileSystemLoader, StrictUndefined
templates_path = os.path.dirname(os.path.realpath(__file__))
header_path = os.path.join(os.path.dirname(templates_path),
'include', 'dataframe')
src_path = os.path.join(os.path.dirname(templates_path), 'src')
dotnet_header_path = os.path.join(os.path.dirname(templates_path),
'dotnet', 'include', 'DataFrameSharp')
dotnet_src_path = os.path.join(os.path.dirname(templates_path), 'dotnet', 'src')
template_env = Environment(loader=FileSystemLoader([templates_path]),
undefined=StrictUndefined,
trim_blocks=True,
lstrip_blocks=True)
def generate(path, **kwargs):
t = template_env.get_template(path)
return t.render(**kwargs)
def write_header(**kwargs):
filename = os.path.join(header_path,
'column' + kwargs['name'] + '.h')
t = generate('column.h', **kwargs)
with open(filename, 'wt') as f:
f.write(t)
def write_source(**kwargs):
filename = os.path.join(src_path,
'column' + kwargs['name'] + '.c')
t = generate('column.c', **kwargs)
with open(filename, 'wt') as f:
f.write(t)
write_header(name='Int8', type='int8_t')
write_header(name='Int16', type='int16_t')
write_header(name='Int32', type='int32_t')
write_header(name='Int64', type='int64_t')
write_header(name='UInt8', type='uint8_t')
write_header(name='UInt16', type='uint16_t')
write_header(name='UInt32', type='uint32_t')
write_header(name='UInt64', type='uint64_t')
write_header(name='Float32', type='float')
write_header(name='Float64', type='double')
write_header(name='String', type='char*')
write_source(name='Int8', type='int8_t')
write_source(name='Int16', type='int16_t')
write_source(name='Int32', type='int32_t')
write_source(name='Int64', type='int64_t')
write_source(name='UInt8', type='uint8_t')
write_source(name='UInt16', type='uint16_t')
write_source(name='UInt32', type='uint32_t')
write_source(name='UInt64', type='uint64_t')
write_source(name='Float32', type='float')
write_source(name='Float64', type='double')
write_source(name='String', type='char*')
def write_dotnet_header(**kwargs):
filename = os.path.join(dotnet_header_path,
'DataColumn' + kwargs['name'] + '.h')
t = generate('dotnet/DataColumn.h', **kwargs)
with open(filename, 'wt') as f:
f.write(t)
def write_dotnet_source(**kwargs):
filename = os.path.join(dotnet_src_path,
'DataColumn' + kwargs['name'] + '.cpp')
t = generate('dotnet/DataColumn.cpp', **kwargs)
with open(filename, 'wt') as f:
f.write(t)
write_dotnet_header(name='Int8', type='int8_t')
write_dotnet_header(name='Int16', type='int16_t')
write_dotnet_header(name='Int32', type='int32_t')
write_dotnet_header(name='Int64', type='int64_t')
write_dotnet_header(name='UInt8', type='uint8_t')
write_dotnet_header(name='UInt16', type='uint16_t')
write_dotnet_header(name='UInt32', type='uint32_t')
write_dotnet_header(name='UInt64', type='uint64_t')
write_dotnet_header(name='Float32', type='float')
write_dotnet_header(name='Float64', type='double')
write_dotnet_header(name='String', type='char*')
write_dotnet_source(name='Int8', type='int8_t')
write_dotnet_source(name='Int16', type='int16_t')
write_dotnet_source(name='Int32', type='int32_t')
write_dotnet_source(name='Int64', type='int64_t')
write_dotnet_source(name='UInt8', type='uint8_t')
write_dotnet_source(name='UInt16', type='uint16_t')
write_dotnet_source(name='UInt32', type='uint32_t')
write_dotnet_source(name='UInt64', type='uint64_t')
write_dotnet_source(name='Float32', type='float')
write_dotnet_source(name='Float64', type='double')
write_dotnet_source(name='String', type='char*')
|
# -*- coding: utf-8 -*-
'''
Author: Hannibal
Data:
Desc: local data config
NOTE: Don't modify this file, it's build by xml-to-python!!!
'''
partnerskill2_map = {};
partnerskill2_map[1] = {"lv":1,"reqlv":1,"gold":20,"goldrate":1,};
partnerskill2_map[2] = {"lv":2,"reqlv":2,"gold":100,"goldrate":4,};
partnerskill2_map[3] = {"lv":3,"reqlv":3,"gold":300,"goldrate":9,};
partnerskill2_map[4] = {"lv":4,"reqlv":4,"gold":600,"goldrate":16,};
partnerskill2_map[5] = {"lv":5,"reqlv":5,"gold":1000,"goldrate":25,};
partnerskill2_map[6] = {"lv":6,"reqlv":6,"gold":2000,"goldrate":36,};
partnerskill2_map[7] = {"lv":7,"reqlv":7,"gold":4000,"goldrate":49,};
partnerskill2_map[8] = {"lv":8,"reqlv":8,"gold":8000,"goldrate":64,};
partnerskill2_map[9] = {"lv":9,"reqlv":9,"gold":20000,"goldrate":81,};
partnerskill2_map[10] = {"lv":10,"reqlv":10,"gold":50000,"goldrate":100,};
partnerskill2_map[11] = {"lv":11,"reqlv":11,"gold":100000,"goldrate":121,};
partnerskill2_map[12] = {"lv":12,"reqlv":12,"gold":200000,"goldrate":144,};
partnerskill2_map[13] = {"lv":13,"reqlv":13,"gold":300000,"goldrate":169,};
partnerskill2_map[14] = {"lv":14,"reqlv":14,"gold":400000,"goldrate":196,};
partnerskill2_map[15] = {"lv":15,"reqlv":15,"gold":500000,"goldrate":225,};
partnerskill2_map[16] = {"lv":16,"reqlv":16,"gold":600000,"goldrate":256,};
partnerskill2_map[17] = {"lv":17,"reqlv":17,"gold":800000,"goldrate":289,};
partnerskill2_map[18] = {"lv":18,"reqlv":18,"gold":1600000,"goldrate":324,};
partnerskill2_map[19] = {"lv":19,"reqlv":19,"gold":3200000,"goldrate":361,};
partnerskill2_map[20] = {"lv":20,"reqlv":20,"gold":6400000,"goldrate":400,};
partnerskill2_map[21] = {"lv":21,"reqlv":21,"gold":18522000,"goldrate":441,};
partnerskill2_map[22] = {"lv":22,"reqlv":22,"gold":21296000,"goldrate":484,};
partnerskill2_map[23] = {"lv":23,"reqlv":23,"gold":24334000,"goldrate":529,};
partnerskill2_map[24] = {"lv":24,"reqlv":24,"gold":27648000,"goldrate":576,};
partnerskill2_map[25] = {"lv":25,"reqlv":25,"gold":31250000,"goldrate":625,};
partnerskill2_map[26] = {"lv":26,"reqlv":26,"gold":35152000,"goldrate":676,};
partnerskill2_map[27] = {"lv":27,"reqlv":27,"gold":39366000,"goldrate":729,};
partnerskill2_map[28] = {"lv":28,"reqlv":28,"gold":43904000,"goldrate":784,};
partnerskill2_map[29] = {"lv":29,"reqlv":29,"gold":48778000,"goldrate":841,};
partnerskill2_map[30] = {"lv":30,"reqlv":30,"gold":54000000,"goldrate":900,};
partnerskill2_map[31] = {"lv":31,"reqlv":31,"gold":59582000,"goldrate":961,};
partnerskill2_map[32] = {"lv":32,"reqlv":32,"gold":65536000,"goldrate":1024,};
partnerskill2_map[33] = {"lv":33,"reqlv":33,"gold":71874000,"goldrate":1089,};
partnerskill2_map[34] = {"lv":34,"reqlv":34,"gold":78608000,"goldrate":1156,};
partnerskill2_map[35] = {"lv":35,"reqlv":35,"gold":85750000,"goldrate":1225,};
partnerskill2_map[36] = {"lv":36,"reqlv":36,"gold":93312000,"goldrate":1296,};
partnerskill2_map[37] = {"lv":37,"reqlv":37,"gold":101306000,"goldrate":1369,};
partnerskill2_map[38] = {"lv":38,"reqlv":38,"gold":109744000,"goldrate":1444,};
partnerskill2_map[39] = {"lv":39,"reqlv":39,"gold":118638000,"goldrate":1521,};
partnerskill2_map[40] = {"lv":40,"reqlv":40,"gold":128000000,"goldrate":1600,};
partnerskill2_map[41] = {"lv":41,"reqlv":41,"gold":137842000,"goldrate":1681,};
partnerskill2_map[42] = {"lv":42,"reqlv":42,"gold":148176000,"goldrate":1764,};
partnerskill2_map[43] = {"lv":43,"reqlv":43,"gold":159014000,"goldrate":1849,};
partnerskill2_map[44] = {"lv":44,"reqlv":44,"gold":170368000,"goldrate":1936,};
partnerskill2_map[45] = {"lv":45,"reqlv":45,"gold":182250000,"goldrate":2025,};
partnerskill2_map[46] = {"lv":46,"reqlv":46,"gold":194672000,"goldrate":2116,};
partnerskill2_map[47] = {"lv":47,"reqlv":47,"gold":207646000,"goldrate":2209,};
partnerskill2_map[48] = {"lv":48,"reqlv":48,"gold":221184000,"goldrate":2304,};
partnerskill2_map[49] = {"lv":49,"reqlv":49,"gold":235298000,"goldrate":2401,};
partnerskill2_map[50] = {"lv":50,"reqlv":50,"gold":250000000,"goldrate":2500,};
partnerskill2_map[51] = {"lv":51,"reqlv":51,"gold":265302000,"goldrate":2601,};
partnerskill2_map[52] = {"lv":52,"reqlv":52,"gold":281216000,"goldrate":2704,};
partnerskill2_map[53] = {"lv":53,"reqlv":53,"gold":297754000,"goldrate":2809,};
partnerskill2_map[54] = {"lv":54,"reqlv":54,"gold":314928000,"goldrate":2916,};
partnerskill2_map[55] = {"lv":55,"reqlv":55,"gold":332750000,"goldrate":3025,};
partnerskill2_map[56] = {"lv":56,"reqlv":56,"gold":351232000,"goldrate":3136,};
partnerskill2_map[57] = {"lv":57,"reqlv":57,"gold":370386000,"goldrate":3249,};
partnerskill2_map[58] = {"lv":58,"reqlv":58,"gold":390224000,"goldrate":3364,};
partnerskill2_map[59] = {"lv":59,"reqlv":59,"gold":410758000,"goldrate":3481,};
partnerskill2_map[60] = {"lv":60,"reqlv":60,"gold":432000000,"goldrate":3600,};
partnerskill2_map[61] = {"lv":61,"reqlv":61,"gold":453962000,"goldrate":3721,};
partnerskill2_map[62] = {"lv":62,"reqlv":62,"gold":476656000,"goldrate":3844,};
partnerskill2_map[63] = {"lv":63,"reqlv":63,"gold":500094000,"goldrate":3969,};
partnerskill2_map[64] = {"lv":64,"reqlv":64,"gold":524288000,"goldrate":4096,};
partnerskill2_map[65] = {"lv":65,"reqlv":65,"gold":549250000,"goldrate":4225,};
partnerskill2_map[66] = {"lv":66,"reqlv":66,"gold":574992000,"goldrate":4356,};
partnerskill2_map[67] = {"lv":67,"reqlv":67,"gold":601526000,"goldrate":4489,};
partnerskill2_map[68] = {"lv":68,"reqlv":68,"gold":628864000,"goldrate":4624,};
partnerskill2_map[69] = {"lv":69,"reqlv":69,"gold":657018000,"goldrate":4761,};
partnerskill2_map[70] = {"lv":70,"reqlv":70,"gold":686000000,"goldrate":4900,};
partnerskill2_map[71] = {"lv":71,"reqlv":71,"gold":715822000,"goldrate":5041,};
partnerskill2_map[72] = {"lv":72,"reqlv":72,"gold":746496000,"goldrate":5184,};
partnerskill2_map[73] = {"lv":73,"reqlv":73,"gold":778034000,"goldrate":5329,};
partnerskill2_map[74] = {"lv":74,"reqlv":74,"gold":810448000,"goldrate":5476,};
partnerskill2_map[75] = {"lv":75,"reqlv":75,"gold":843750000,"goldrate":5625,};
partnerskill2_map[76] = {"lv":76,"reqlv":76,"gold":877952000,"goldrate":5776,};
partnerskill2_map[77] = {"lv":77,"reqlv":77,"gold":913066000,"goldrate":5929,};
partnerskill2_map[78] = {"lv":78,"reqlv":78,"gold":949104000,"goldrate":6084,};
partnerskill2_map[79] = {"lv":79,"reqlv":79,"gold":986078000,"goldrate":6241,};
partnerskill2_map[80] = {"lv":80,"reqlv":80,"gold":1024000000,"goldrate":6400,};
partnerskill2_map[81] = {"lv":81,"reqlv":81,"gold":1062882000,"goldrate":6561,};
partnerskill2_map[82] = {"lv":82,"reqlv":82,"gold":1102736000,"goldrate":6724,};
partnerskill2_map[83] = {"lv":83,"reqlv":83,"gold":1143574000,"goldrate":6889,};
partnerskill2_map[84] = {"lv":84,"reqlv":84,"gold":1185408000,"goldrate":7056,};
partnerskill2_map[85] = {"lv":85,"reqlv":85,"gold":1228250000,"goldrate":7225,};
partnerskill2_map[86] = {"lv":86,"reqlv":86,"gold":1272112000,"goldrate":7396,};
partnerskill2_map[87] = {"lv":87,"reqlv":87,"gold":1317006000,"goldrate":7569,};
partnerskill2_map[88] = {"lv":88,"reqlv":88,"gold":1362944000,"goldrate":7744,};
partnerskill2_map[89] = {"lv":89,"reqlv":89,"gold":1409938000,"goldrate":7921,};
partnerskill2_map[90] = {"lv":90,"reqlv":90,"gold":1458000000,"goldrate":8100,};
partnerskill2_map[91] = {"lv":91,"reqlv":91,"gold":1507142000,"goldrate":8281,};
partnerskill2_map[92] = {"lv":92,"reqlv":92,"gold":1557376000,"goldrate":8464,};
partnerskill2_map[93] = {"lv":93,"reqlv":93,"gold":1608714000,"goldrate":8649,};
partnerskill2_map[94] = {"lv":94,"reqlv":94,"gold":1661168000,"goldrate":8836,};
partnerskill2_map[95] = {"lv":95,"reqlv":95,"gold":1714750000,"goldrate":9025,};
partnerskill2_map[96] = {"lv":96,"reqlv":96,"gold":1769472000,"goldrate":9216,};
partnerskill2_map[97] = {"lv":97,"reqlv":97,"gold":1825346000,"goldrate":9409,};
partnerskill2_map[98] = {"lv":98,"reqlv":98,"gold":1882384000,"goldrate":9604,};
partnerskill2_map[99] = {"lv":99,"reqlv":99,"gold":1940598000,"goldrate":9801,};
class Partnerskill2:
def __init__(self, key):
config = partnerskill2_map.get(key);
for k, v in config.items():
setattr(self, k, v);
return
def create_Partnerskill2(key):
config = partnerskill2_map.get(key);
if not config:
return
return Partnerskill2(key)
|
import json
import requests
from flask import current_app
from app.core import lexer,token_exprs
def parse(text):
result =[]
token_table= 'Sequence' + '(Token , token type)\n'
result.append(token_table)
tokens = lexer(text, token_exprs)
if not isinstance(tokens, list):
error = 'The {} line: Syntax error(0) the\' {}\' token cannot be recognized'.format(tokens['line'], tokens['character'])
print(error)
return error
for i,token in enumerate(tokens):
token_table = '[{}] {}\n'.format(i,str(token))
result.append(token_table)
# token_table+
print(token_table)
return result
# return json.loads(r.content.decode('utf-8-sig'))
|
'''Author: Brandon Trabucco, Copyright 2019
Helper functions to display and run a simple game'''
#####################################
# An interface representing actions #
#####################################
class Walkable(object):
pass
class Breakable(object):
pass
class Pickable(object):
pass
class Harmful(object):
pass
|
"""
Splitting image containing two samples
.. note:: Using these scripts for 1+GB images take several tens of GB RAM
Sample usage::
python split_images_two_tissues.py \
-i "/datagrid/Medical/dataset_ANHIR/images/COAD_*/scale-100pc/*_*.png" \
--nb_workers 3
Copyright (C) 2016-2019 Jiri Borovec <jiri.borovec@fel.cvut.cz>
"""
import argparse
import gc
import glob
import logging
import os
import sys
import time
from functools import partial
import cv2 as cv
sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root
from birl.utilities.dataset import (
args_expand_parse_images,
find_split_objects,
load_large_image,
project_object_edge,
save_large_image,
)
from birl.utilities.experiments import get_nb_workers, iterate_mproc_map
NB_WORKERS = get_nb_workers(0.5)
#: use following image size for estimating cutting line
SCALE_SIZE = 512
#: cut image in one dimension/axis
CUT_DIMENSION = 0
def arg_parse_params():
""" parse the input parameters
:return dict: parameters
"""
# SEE: https://docs.python.org/3/library/argparse.html
parser = argparse.ArgumentParser()
parser.add_argument(
'--dimension', type=int, required=False, choices=[0, 1], help='cutting dimension', default=CUT_DIMENSION
)
args = args_expand_parse_images(parser, NB_WORKERS)
logging.info('ARGUMENTS: \n%r' % args)
return args
def split_image(img_path, overwrite=False, cut_dim=CUT_DIMENSION):
""" split two images in single dimension
the input images assume to contain two names in the image name separated by "_"
:param str img_path: path to the input / output image
:param bool overwrite: allow overwrite exiting output images
:param int cut_dim: define splitting dimension
"""
name, ext = os.path.splitext(os.path.basename(img_path))
folder = os.path.dirname(img_path)
obj_names = name.split('_')
paths_img = [os.path.join(folder, obj_name + ext) for obj_name in obj_names]
if all(os.path.isfile(p) for p in paths_img) and not overwrite:
logging.debug('existing all splits of %r', paths_img)
return
img = load_large_image(img_path)
# work with just a scaled version
scale_factor = max(1, img.shape[cut_dim] / float(SCALE_SIZE))
sc = 1. / scale_factor
order = cv.INTER_AREA if scale_factor > 1 else cv.INTER_LINEAR
img_small = 255 - cv.resize(img, None, fx=sc, fy=sc, interpolation=order)
img_edge = project_object_edge(img_small, cut_dim)
del img_small
# prepare all cut edges and scale them to original image size
splits = find_split_objects(img_edge, nb_objects=len(obj_names))
if not splits:
logging.error('no splits found for %s', img_path)
return
edges = [int(round(i * scale_factor)) for i in [0] + splits + [len(img_edge)]]
# cutting images
for i, path_img_cut in enumerate(paths_img):
if os.path.isfile(path_img_cut) and not overwrite:
logging.debug('existing "%s"', path_img_cut)
continue
if cut_dim == 0:
img_cut = img[edges[i]:edges[i + 1], ...]
elif cut_dim == 1:
img_cut = img[:, edges[i]:edges[i + 1], ...]
else:
raise ValueError('unsupported dimension: %i' % cut_dim)
save_large_image(path_img_cut, img_cut)
gc.collect()
time.sleep(1)
def main(path_images, dimension, overwrite, nb_workers):
""" main entry point
:param path_images: path to images
:param int dimension: for 2D inages it is 0 or 1
:param bool overwrite: whether overwrite existing image on output
:param int nb_workers: nb jobs running in parallel
"""
image_paths = sorted(glob.glob(path_images))
if not image_paths:
logging.info('No images found on "%s"', path_images)
return
_wrap_split = partial(split_image, cut_dim=dimension, overwrite=overwrite)
list(iterate_mproc_map(_wrap_split, image_paths, desc='Cut image tissues', nb_workers=nb_workers))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
arg_params = arg_parse_params()
logging.info('running...')
main(**arg_params)
logging.info('DONE')
|
import functools
import glob
import gzip
import os
import sys
import warnings
import zipfile
from itertools import product
from django.apps import apps
from django.conf import settings
from django.core import serializers
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError
from django.core.management.color import no_style
from django.core.management.utils import parse_apps_and_model_labels
from django.db import (
DEFAULT_DB_ALIAS, DatabaseError, IntegrityError, connections, router,
transaction,
)
from django.utils.functional import cached_property
try:
import bz2
has_bz2 = True
except ImportError:
has_bz2 = False
READ_STDIN = '-'
class Command(BaseCommand):
help = 'Installs the named fixture(s) in the database.'
missing_args_message = (
"No database fixture specified. Please provide the path of at least "
"one fixture in the command line."
)
def add_arguments(self, parser):
parser.add_argument('args', metavar='fixture', nargs='+', help='Fixture labels.')
parser.add_argument(
'--database', default=DEFAULT_DB_ALIAS,
help='Nominates a specific database to load fixtures into. Defaults to the "default" database.',
)
parser.add_argument(
'--app', dest='app_label',
help='Only look for fixtures in the specified app.',
)
parser.add_argument(
'--ignorenonexistent', '-i', action='store_true', dest='ignore',
help='Ignores entries in the serialized data for fields that do not '
'currently exist on the model.',
)
parser.add_argument(
'-e', '--exclude', action='append', default=[],
help='An app_label or app_label.ModelName to exclude. Can be used multiple times.',
)
parser.add_argument(
'--format',
help='Format of serialized data when reading from stdin.',
)
def handle(self, *fixture_labels, **options):
self.ignore = options['ignore']
self.using = options['database']
self.app_label = options['app_label']
self.verbosity = options['verbosity']
self.excluded_models, self.excluded_apps = parse_apps_and_model_labels(options['exclude'])
self.format = options['format']
with transaction.atomic(using=self.using):
self.loaddata(fixture_labels)
# Close the DB connection -- unless we're still in a transaction. This
# is required as a workaround for an edge case in MySQL: if the same
# connection is used to create tables, load data, and query, the query
# can return incorrect results. See Django #7572, MySQL #37735.
if transaction.get_autocommit(self.using):
connections[self.using].close()
def loaddata(self, fixture_labels):
connection = connections[self.using]
# Keep a count of the installed objects and fixtures
self.fixture_count = 0
self.loaded_object_count = 0
self.fixture_object_count = 0
self.models = set()
self.serialization_formats = serializers.get_public_serializer_formats()
# Forcing binary mode may be revisited after dropping Python 2 support (see #22399)
self.compression_formats = {
None: (open, 'rb'),
'gz': (gzip.GzipFile, 'rb'),
'zip': (SingleZipReader, 'r'),
'stdin': (lambda *args: sys.stdin, None),
}
if has_bz2:
self.compression_formats['bz2'] = (bz2.BZ2File, 'r')
# Django's test suite repeatedly tries to load initial_data fixtures
# from apps that don't have any fixtures. Because disabling constraint
# checks can be expensive on some database (especially MSSQL), bail
# out early if no fixtures are found.
for fixture_label in fixture_labels:
if self.find_fixtures(fixture_label):
break
else:
return
with connection.constraint_checks_disabled():
self.objs_with_deferred_fields = []
for fixture_label in fixture_labels:
self.load_label(fixture_label)
for obj in self.objs_with_deferred_fields:
obj.save_deferred_fields(using=self.using)
# Since we disabled constraint checks, we must manually check for
# any invalid keys that might have been added
table_names = [model._meta.db_table for model in self.models]
try:
connection.check_constraints(table_names=table_names)
except Exception as e:
e.args = ("Problem installing fixtures: %s" % e,)
raise
# If we found even one object in a fixture, we need to reset the
# database sequences.
if self.loaded_object_count > 0:
sequence_sql = connection.ops.sequence_reset_sql(no_style(), self.models)
if sequence_sql:
if self.verbosity >= 2:
self.stdout.write("Resetting sequences\n")
with connection.cursor() as cursor:
for line in sequence_sql:
cursor.execute(line)
if self.verbosity >= 1:
if self.fixture_object_count == self.loaded_object_count:
self.stdout.write(
"Installed %d object(s) from %d fixture(s)"
% (self.loaded_object_count, self.fixture_count)
)
else:
self.stdout.write(
"Installed %d object(s) (of %d) from %d fixture(s)"
% (self.loaded_object_count, self.fixture_object_count, self.fixture_count)
)
def load_label(self, fixture_label):
"""Load fixtures files for a given label."""
show_progress = self.verbosity >= 3
for fixture_file, fixture_dir, fixture_name in self.find_fixtures(fixture_label):
_, ser_fmt, cmp_fmt = self.parse_name(os.path.basename(fixture_file))
open_method, mode = self.compression_formats[cmp_fmt]
fixture = open_method(fixture_file, mode)
try:
self.fixture_count += 1
objects_in_fixture = 0
loaded_objects_in_fixture = 0
if self.verbosity >= 2:
self.stdout.write(
"Installing %s fixture '%s' from %s."
% (ser_fmt, fixture_name, humanize(fixture_dir))
)
objects = serializers.deserialize(
ser_fmt, fixture, using=self.using, ignorenonexistent=self.ignore,
handle_forward_references=True,
)
for obj in objects:
objects_in_fixture += 1
if (obj.object._meta.app_config in self.excluded_apps or
type(obj.object) in self.excluded_models):
continue
if router.allow_migrate_model(self.using, obj.object.__class__):
loaded_objects_in_fixture += 1
self.models.add(obj.object.__class__)
try:
obj.save(using=self.using)
if show_progress:
self.stdout.write(
'\rProcessed %i object(s).' % loaded_objects_in_fixture,
ending=''
)
# psycopg2 raises ValueError if data contains NUL chars.
except (DatabaseError, IntegrityError, ValueError) as e:
e.args = ("Could not load %(app_label)s.%(object_name)s(pk=%(pk)s): %(error_msg)s" % {
'app_label': obj.object._meta.app_label,
'object_name': obj.object._meta.object_name,
'pk': obj.object.pk,
'error_msg': e,
},)
raise
if obj.deferred_fields:
self.objs_with_deferred_fields.append(obj)
if objects and show_progress:
self.stdout.write('') # add a newline after progress indicator
self.loaded_object_count += loaded_objects_in_fixture
self.fixture_object_count += objects_in_fixture
except Exception as e:
if not isinstance(e, CommandError):
e.args = ("Problem installing fixture '%s': %s" % (fixture_file, e),)
raise
finally:
fixture.close()
# Warn if the fixture we loaded contains 0 objects.
if objects_in_fixture == 0:
warnings.warn(
"No fixture data found for '%s'. (File format may be "
"invalid.)" % fixture_name,
RuntimeWarning
)
@functools.lru_cache(maxsize=None)
def find_fixtures(self, fixture_label):
"""Find fixture files for a given label."""
if fixture_label == READ_STDIN:
return [(READ_STDIN, None, READ_STDIN)]
fixture_name, ser_fmt, cmp_fmt = self.parse_name(fixture_label)
databases = [self.using, None]
cmp_fmts = list(self.compression_formats) if cmp_fmt is None else [cmp_fmt]
ser_fmts = serializers.get_public_serializer_formats() if ser_fmt is None else [ser_fmt]
if self.verbosity >= 2:
self.stdout.write("Loading '%s' fixtures…" % fixture_name)
if os.path.isabs(fixture_name):
fixture_dirs = [os.path.dirname(fixture_name)]
fixture_name = os.path.basename(fixture_name)
else:
fixture_dirs = self.fixture_dirs
if os.path.sep in os.path.normpath(fixture_name):
fixture_dirs = [os.path.join(dir_, os.path.dirname(fixture_name))
for dir_ in fixture_dirs]
fixture_name = os.path.basename(fixture_name)
suffixes = (
'.'.join(ext for ext in combo if ext)
for combo in product(databases, ser_fmts, cmp_fmts)
)
targets = {'.'.join((fixture_name, suffix)) for suffix in suffixes}
fixture_files = []
for fixture_dir in fixture_dirs:
if self.verbosity >= 2:
self.stdout.write("Checking %s for fixtures…" % humanize(fixture_dir))
fixture_files_in_dir = []
path = os.path.join(fixture_dir, fixture_name)
for candidate in glob.iglob(glob.escape(path) + '*'):
if os.path.basename(candidate) in targets:
# Save the fixture_dir and fixture_name for future error messages.
fixture_files_in_dir.append((candidate, fixture_dir, fixture_name))
if self.verbosity >= 2 and not fixture_files_in_dir:
self.stdout.write("No fixture '%s' in %s." %
(fixture_name, humanize(fixture_dir)))
# Check kept for backwards-compatibility; it isn't clear why
# duplicates are only allowed in different directories.
if len(fixture_files_in_dir) > 1:
raise CommandError(
"Multiple fixtures named '%s' in %s. Aborting." %
(fixture_name, humanize(fixture_dir)))
fixture_files.extend(fixture_files_in_dir)
if not fixture_files:
raise CommandError("No fixture named '%s' found." % fixture_name)
return fixture_files
@cached_property
def fixture_dirs(self):
"""
Return a list of fixture directories.
The list contains the 'fixtures' subdirectory of each installed
application, if it exists, the directories in FIXTURE_DIRS, and the
current directory.
"""
dirs = []
fixture_dirs = settings.FIXTURE_DIRS
if len(fixture_dirs) != len(set(fixture_dirs)):
raise ImproperlyConfigured("settings.FIXTURE_DIRS contains duplicates.")
for app_config in apps.get_app_configs():
app_label = app_config.label
app_dir = os.path.join(app_config.path, 'fixtures')
if app_dir in fixture_dirs:
raise ImproperlyConfigured(
"'%s' is a default fixture directory for the '%s' app "
"and cannot be listed in settings.FIXTURE_DIRS." % (app_dir, app_label)
)
if self.app_label and app_label != self.app_label:
continue
if os.path.isdir(app_dir):
dirs.append(app_dir)
dirs.extend(fixture_dirs)
dirs.append('')
dirs = [os.path.abspath(os.path.realpath(d)) for d in dirs]
return dirs
def parse_name(self, fixture_name):
"""
Split fixture name in name, serialization format, compression format.
"""
if fixture_name == READ_STDIN:
if not self.format:
raise CommandError('--format must be specified when reading from stdin.')
return READ_STDIN, self.format, 'stdin'
parts = fixture_name.rsplit('.', 2)
if len(parts) > 1 and parts[-1] in self.compression_formats:
cmp_fmt = parts[-1]
parts = parts[:-1]
else:
cmp_fmt = None
if len(parts) > 1:
if parts[-1] in self.serialization_formats:
ser_fmt = parts[-1]
parts = parts[:-1]
else:
raise CommandError(
"Problem installing fixture '%s': %s is not a known "
"serialization format." % ('.'.join(parts[:-1]), parts[-1]))
else:
ser_fmt = None
name = '.'.join(parts)
return name, ser_fmt, cmp_fmt
class SingleZipReader(zipfile.ZipFile):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if len(self.namelist()) != 1:
raise ValueError("Zip-compressed fixtures must contain one file.")
def read(self):
return zipfile.ZipFile.read(self, self.namelist()[0])
def humanize(dirname):
return "'%s'" % dirname if dirname else 'absolute path'
|
from pathlib import Path
from typing import Optional
import typer
from reprexlite.formatting import Venue
from reprexlite.reprex import reprex
from reprexlite.version import __version__
app = typer.Typer()
def version_callback(version: bool):
"""Print reprexlite version to console."""
if version:
typer.echo(__version__)
raise typer.Exit()
@app.command()
def main(
infile: Optional[Path] = typer.Option(
None, "--infile", "-i", help="Read code from an input file instead via editor."
),
outfile: Optional[Path] = typer.Option(
None, "--outfile", "-o", help="Write output to file instead of printing to console."
),
venue: Venue = typer.Option(
"gh",
"--venue",
"-v",
help="Output format appropriate to the venue where you plan to share this code.",
),
advertise: Optional[bool] = typer.Option(
None,
help="Whether to include footer that credits reprexlite. "
"If unspecified, will depend on specified venue's default.",
),
session_info: Optional[bool] = typer.Option(
None,
"--session-info",
help="Whether to include details about session and installed packages.",
),
style: Optional[bool] = typer.Option(
None, "--style", help="Autoformat code with black. Requires black to be installed."
),
comment: str = typer.Option(
"#>", "--comment", help="Comment prefix to use for results returned by expressions."
),
old_results: Optional[bool] = typer.Option(
None,
"--old-results",
help=(
"Keep old results, i.e., lines that match the prefix specified by the --comment "
"option. If not using this option, then such lines are removed, meaning that an input "
"that is a reprex will be effectively regenerated."
),
),
version: Optional[bool] = typer.Option(
None,
"--version",
callback=version_callback,
is_eager=True,
help="Show reprexlite version and exit.",
),
):
"""Render reproducible examples of Python code for sharing. Your code will be executed and the
results will be embedded as comments below their associated lines.
By default, your system's default command-line text editor will open for you to type or paste
in your code. This editor can be changed by setting the EDITOR environment variable. You can
instead specify an input file with the --infile / -i option
Additional markup will be added that is appropriate to the choice of venue option. For example,
for the default `gh` venue for GitHub Flavored Markdown, the final reprex will look like:
\b
----------------------------------------
```python
arr = [1, 2, 3, 4, 5]
[x + 1 for x in arr]
#> [2, 3, 4, 5, 6]
max(arr) - min(arr)
#> 4
```
\b
<sup>Created at 2021-02-27 00:13:55 PST by [reprexlite](https://github.com/jayqi/reprexlite) v0.3.1</sup>
----------------------------------------
\b
The supported venue formats are:
\b
- gh : GitHub Flavored Markdown
- so : StackOverflow, alias for gh
- ds : Discourse, alias for gh
- html : HTML
- py : Python script
- rtf : Rich Text Format
- slack : Slack
"""
if infile:
with infile.open("r") as fp:
input = fp.read()
else:
input = typer.edit() or ""
rendered = reprex(
input,
outfile=outfile,
venue=venue.value,
advertise=advertise,
session_info=session_info if session_info else False,
style=style if style else False,
comment=comment,
old_results=old_results if old_results else False,
print_=False,
terminal=True,
)
if outfile:
typer.echo(f"Wrote reprex to {outfile}")
else:
typer.echo(str(rendered) + "\n")
|
#
# wtfdmdg.py
#
# Where The Fuck Did My Day Go, dot pie
#
# A tool to help answer that question.
#
from PyQt5 import Qt, QtGui, QtWidgets, QtCore
import pyqtgraph as pg
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
import sys
import re
import collections
import datetime
import time
import pickle
import os
import itertools
import pylab
from pathlib import Path
Task = collections.namedtuple( "Task", ( "ref", "begin", "end", "body" ) )
APPDATA_DIR = os.path.join( str( Path.home() ), ".local", "share", "wtfdmdg" )
CMAP = 'gist_rainbow'
def FILE_PATH( dt ):
return os.path.join( APPDATA_DIR, datetime.datetime.strftime( dt, "%y-%m-%d.pickle" ) )
class WtfdmdgCommandParserInterface( object ):
def getCommandLineHighlighter( self, document ):
"""
Return a QSyntaxHighlighter
"""
raise NotImplementedError
def getTagBankHighlighter( self, document ):
"""
Return a QSyntaxHighlighter
"""
raise NotImplementedError
def execute( self, session, line ):
"""
Evaluate line and execute against session
"""
raise NotImplementedError
def getTaskTags( self, body ):
"""
Return a dict mapping tag class to a list of tags, given
the body of a task.
"""
raise NotImplementedError
def encodeTask( self, task ):
"""
Return a command string that yields this task exactly
"""
raise NotImplementedError
class WtfdmdgDefaultCommandParser( WtfdmdgCommandParserInterface ):
REF = r"(?:(?P<ref>(?:\d+)|\*):)?"
BODY = r"(?:(?P<body>.+))?"
TIME = r"(?:\d+)|n"
BEGIN = r"(?:(?P<begin>" + TIME + "))?"
END = r"(?:(?P<end>" + TIME + "))?"
TAG = r"(/+)(\S+)"
TAG_REGEX = re.compile( TAG )
LINE_REGEX = re.compile( REF + BEGIN + "-?" + END + "\.?" + BODY )
TAGBANK_REGEX = re.compile( r"\S+" )
class CommandLineSyntaxHighlighter( QtGui.QSyntaxHighlighter ):
def __init__( self, parser, document ):
super( WtfdmdgDefaultCommandParser.CommandLineSyntaxHighlighter, self ).__init__( document )
self.parser = parser
def highlightBlock( self, block ):
for rng, fmt in zip( self.parser._getRanges( block ), self.parser._getFormats() ):
if rng[0] >= 0 and rng[1] >= 0:
self.setFormat( rng[0], rng[1], fmt )
class TagBankSyntaxHighlighter( QtGui.QSyntaxHighlighter ):
def __init__( self, parser, tagclass, document ):
super( WtfdmdgDefaultCommandParser.TagBankSyntaxHighlighter, self ).__init__( document )
self.parser = parser
self.tagclass = tagclass
def highlightBlock( self, block ):
app = WtfdmdgApplication.instance()
if self.tagclass is not app.getSelectedTagClass():
# Nothing to do
return
tagRanges = self.parser._getTagBankRanges( block )
tagColors = [ app.getTagColor( self.tagclass, block[ x[0]:x[1] ] ) for x in tagRanges ]
for rng, clr in zip( tagRanges, tagColors ):
if rng[0] >= 0 and rng[1] >= 0:
fmt = QtGui.QTextCharFormat()
fmt.setFontWeight( QtGui.QFont.Bold )
fmt.setForeground( QtGui.QColor( *clr ) )
self.setFormat( rng[0], rng[1], fmt )
def getCommandLineHighlighter( self, document ):
return WtfdmdgDefaultCommandParser.CommandLineSyntaxHighlighter( self, document )
def getTagBankHighlighter( self, tagclass, document ):
return WtfdmdgDefaultCommandParser.TagBankSyntaxHighlighter( self, tagclass, document )
def execute( self, tasks, line ):
app = WtfdmdgApplication.instance()
ref, begin, end, body = self._getParts( line )
begin = self._getDatetime( begin )
end = self._getDatetime( end )
if all( x is None for x in [ ref, begin, end, body ] ):
print( "NOP" )
elif ref is not None and all( x is None for x in ( begin, end, body ) ):
if ref != "*":
del tasks[ int( ref ) ]
elif ( ref is None or int( ref ) not in app.session ) and any( x is not None for x in [ begin, end, body ] ):
if body is None:
# New tasks must always have body
print( "NOP" )
else:
ref = int( ref or app.generateTaskId() )
tasks[ int( ref ) ] = Task( ref, begin, end, body )
elif ref != "*":
a, b, c, d = tasks[ int( ref ) ]
if begin is not None:
b = begin
if end is not None:
c = end
if body is not None:
d = body
tasks[ int( ref ) ] = Task( a, b, c, d )
def getTaskTags( self, body ):
tagtable = {}
for tagmatch in WtfdmdgDefaultCommandParser.TAG_REGEX.findall( body ):
tagclass = len( tagmatch[0] )
tagtext = tagmatch[1].lower()
if tagclass not in tagtable:
tagtable[ tagclass ] = []
if tagtext not in tagtable[ tagclass ]:
tagtable[ tagclass ].append( tagtext )
return tagtable
def _getParts( self, line ):
m = WtfdmdgDefaultCommandParser.LINE_REGEX.match( line )
if m is None:
return None
return [ m.group( x ) for x in [ "ref", "begin", "end", "body" ] ]
def _getRanges( self, line ):
m = WtfdmdgDefaultCommandParser.LINE_REGEX.match( line )
if m is None:
return None
return [ m.span( x ) for x in [ "ref", "begin", "end", "body" ] ]
def _getTagBankRanges( self, line ):
return [ m.span() for m in WtfdmdgDefaultCommandParser.TAGBANK_REGEX.finditer( line ) ]
def _getFormats( self ):
reff = QtGui.QTextCharFormat()
beginf = QtGui.QTextCharFormat()
endf = QtGui.QTextCharFormat()
bodyf = QtGui.QTextCharFormat()
reff.setFontWeight( QtGui.QFont.Bold )
beginf.setFontWeight( QtGui.QFont.Bold )
beginf.setForeground( QtGui.QColor( 100, 100, 255 ) )
endf.setFontWeight( QtGui.QFont.Bold )
endf.setForeground( QtGui.QColor( 200, 200, 0 ) )
bodyf.setForeground( QtGui.QColor( 255, 0, 0 ) )
bodyf.setFontWeight( QtGui.QFont.Bold )
return [ reff, beginf, endf, bodyf ]
def _getDatetime( self, string ):
if string is None:
return None
if string == "n":
dt = datetime.datetime.now()
dt.replace( second=0 )
return dt
if string.isdigit():
if len( string ) <= 2:
hr = int( string )
mn = 0
else:
mn = int( string[-2:] )
hr = int( string[ :-2 ] )
dt = datetime.datetime.now()
dt = dt.replace( hour=hr, minute=mn, second=0 )
return dt
assert( False )
return None
def encodeTask( self, task ):
text = ""
if task.ref is not None:
text += str( task.ref ) + ":"
if task.begin is not None:
text += datetime.datetime.strftime( task.begin, "%H%M" )
if task.end is not None:
text += "-" + datetime.datetime.strftime( task.end, "%H%M" )
if task.body is not None:
text += "." + task.body
return text
class WtfdmdgApplication( QtWidgets.QApplication ):
def __init__( self, argv, parser=None ):
"""
Initialize application
"""
super( WtfdmdgApplication, self ).__init__( argv )
self.session = {}
self.tagtable = {}
self.selectedTask = None
self.selectedTagClass = None
if parser is None:
parser = WtfdmdgDefaultCommandParser()
self._tagColorMap = pylab.get_cmap( CMAP )
self._commandParser = parser
self.loadFile()
self._mainWindow = WtfdmdgMainWindow()
self._mainWindow._commandTextEdit.setFocus()
self.redraw()
def loadFile( self, path=None ):
"""
Load state from file.
"""
path = path or FILE_PATH( datetime.datetime.now() )
if os.path.exists( path ):
self.session = pickle.load( open( path, 'rb' ) )
self.__refreshTags()
def dumpFile( self ):
"""
Export state to file
"""
if not os.path.exists( APPDATA_DIR ):
os.makedirs( APPDATA_DIR )
pickle.dump( self.session, open( FILE_PATH( datetime.datetime.now() ), 'wb' ) )
def redraw( self ):
"""
Redraw application
"""
self._mainWindow._taskTable.redraw( self.session )
self._mainWindow._tagTable.redraw( self.tagtable )
self._mainWindow._timelineWidget.redraw()
def processLine( self, line ):
"""
Parse and process a line of input
"""
self._commandParser.execute( self.session, line )
self.__refreshTags()
self.dumpFile()
self.deselectTask()
self.redraw()
def checkTaskSelect( self, line ):
"""
Check to see if we should select a task
"""
ref, _, _, _ = self._commandParser._getParts( line )
if ref is not None:
if ref.isdigit():
self.selectTaskByRef( int( ref ) )
else:
self.deselectTask()
else:
self.deselectTask()
def getCommandLineHighlighter( self, doc ):
"""
Apply highlighting to command line document
"""
return self._commandParser.getCommandLineHighlighter( doc )
def getTagBankHighlighter( self, tagclass, doc ):
"""
Apply highlighting to tagbank document
"""
return self._commandParser.getTagBankHighlighter( tagclass, doc )
def getTagColor( self, tagclass, tag ):
"""
Get (r,g,b) color for tag in tagclass
"""
assert( tagclass in self.tagtable and tag in self.tagtable[ tagclass ] )
i = self.tagtable[ tagclass ].index( tag.lower() )
nc = len( self.tagtable[ tagclass ] )
return [ 255 * x for x in ( self._tagColorMap( float( i ) / nc )[:-1] ) ]
def getSession( self ):
"""
Return the current session
"""
return self.session
def getTags( self ):
"""
Get the tags dict
"""
return self.tagtable
def getTagsForTask( self, task ):
"""
Get tags referenced by this task
"""
if task.body is None:
return {}
return self._commandParser.getTaskTags( task.body )
def getSelectedTags( self ):
"""
Return the list of selected tags
"""
return self._mainWindow._tagTable.getSelectedTags()
def getSelectedTagClass( self ):
"""
Get the currently selected tag class
"""
return self.selectedTagClass
def generateTaskId( self ):
"""
Create a unique task id
"""
if len( self.session ) == 0:
return 0
return max( self.session.keys() ) + 1
def getTaskByIndex( self, index ):
"""
Get a task by its order in the session.
"""
return self.__getSortedTaskList()[ index ]
def getSelectedTask( self ):
"""
Return the currently selected task
"""
if self.selectedTask is not None:
return self.session[ self.selectedTask ]
return None
def getSelectedTaskIndex( self ):
"""
Get the index of the selected task
"""
if self.selectedTask is None:
return None
refs = [ x.ref for x in self.__getSortedTaskList() ]
assert( self.selectedTask in refs )
return refs.index( self.selectedTask )
def selectTaskByRef( self, ref ):
"""
Set selected task by ref ID
"""
if ref in self.session:
self.selectedTask = ref
def reverseTask( self, task ):
"""
Return command string for task
"""
return self._commandParser.encodeTask( task )
def stepTask( self, offset ):
"""
Advance task by positive/negative index count. Advancing past end
or before start clears selection. When selection is clear, moving
backward goes to last index, and moving forward goes to first index.
No selection can be thought of as a final "invisible" item.
"""
refs = [ x.ref for x in self.__getSortedTaskList() ] + [ None ]
assert( self.selectedTask in refs )
curi = refs.index( self.selectedTask )
self.selectedTask = refs[ ( curi + offset ) % len( refs ) ]
def selectNextTask( self ):
"""
Select the next task
"""
self.stepTask( 1 )
def selectPreviousTask( self ):
"""
Select the previous task
"""
self.stepTask( -1 )
def stepTagClass( self, offset ):
"""
Same as stepTask, but for the tag class.
"""
clss = sorted( list( self.tagtable.keys() ) ) + [ None ]
curi = clss.index( self.selectedTagClass )
self.selectedTagClass = clss[ ( curi + offset ) % len( clss ) ]
def selectNextTagClass( self ):
"""
Select the next tag class.
"""
self.stepTagClass( 1 )
def selectPreviousTagClass( self ):
"""
Select the previous tag class
"""
self.stepTagClass( -1 )
def deselectTask( self ):
"""
Don't select any tasks
"""
self.selectedTask = None
def __getSortedTaskList( self ):
"""
Return list of tasks, sorted by start time.
"""
tasks = self.session.values()
startedTasks = [ t for t in tasks if t.begin is not None ]
unstartedTasks = [ t for t in tasks if t.begin is None ]
return ( unstartedTasks + sorted( startedTasks, key=lambda r: r.begin ) )
def __mergeTags( self, tags ):
"""
Given a dict mapping tag class to tag list, merge it
into self.tagtable.
"""
for cls in tags:
if cls not in self.tagtable:
self.tagtable[ cls ] = []
for tag in tags[ cls ]:
if tag not in self.tagtable[ cls ]:
self.tagtable[ cls ].append( tag )
def __refreshTags( self ):
"""
Search current session for any tags.
"""
self.tagtable = {}
for task in self.session.values():
self.__mergeTags( self._commandParser.getTaskTags( task.body ) )
class WtfdmdgMainWindow( QtWidgets.QMainWindow ):
def __init__( self ):
"""
Initialize application main window
"""
super( WtfdmdgMainWindow, self ).__init__()
# Create top-level layout
topLevelLayout = QtWidgets.QVBoxLayout()
topLevelLayout.setSpacing( 0 )
topLevelLayout.setContentsMargins( 0, 0, 0, 0 )
centralWidget = QtWidgets.QWidget();
centralWidget.setLayout( topLevelLayout )
self.setCentralWidget( centralWidget )
anotherlayout = QtWidgets.QHBoxLayout()
anotherlayout.setSpacing( 0 )
anotherlayout.setContentsMargins( 0, 0, 0, 0 )
anotherAnotherLayout = QtWidgets.QVBoxLayout()
anotherAnotherLayout.setSpacing( 0 )
anotherAnotherLayout.setContentsMargins( 0, 0, 0, 0 )
# Add the widgets
self._commandTextEdit = WtfdmdgCommandTextEdit()
self._taskTable = WtfdmdgTaskTable()
self._tagTable = WtfdmdgTagTable()
self._timelineWidget = WtfdmdgTimelineWidget()
anotherAnotherLayout.addWidget( self._tagTable )
anotherAnotherLayout.addWidget( self._taskTable )
anotherAnotherLayout.setStretch( 0, 1 )
anotherAnotherLayout.setStretch( 1, 5 )
anotherlayout.addLayout( anotherAnotherLayout )
anotherlayout.addWidget( self._timelineWidget )
anotherlayout.setStretch( 0, 2 )
anotherlayout.setStretch( 1, 1 )
topLevelLayout.addLayout( anotherlayout )
topLevelLayout.addWidget( self._commandTextEdit )
topLevelLayout.setStretch( 0, 999 )
topLevelLayout.setStretch( 1, 1 )
# Set title
self.setWindowTitle( "Where The Fuck Did My Day Go?" )
self.show()
class WtfdmdgCommandTextEdit( QtWidgets.QTextEdit ):
def __init__( self ):
"""
Initialize the command text edit.
"""
super( WtfdmdgCommandTextEdit, self ).__init__()
self.setVerticalScrollBarPolicy( QtCore.Qt.ScrollBarAlwaysOff )
self._hilighter = WtfdmdgApplication.instance().getCommandLineHighlighter( self.document() )
self.setFont( QtGui.QFontDatabase.systemFont( QtGui.QFontDatabase.FixedFont ) )
self.setMinimumHeight( self.document().size().height() )
def keyPressEvent( self, event ):
"""
Capture some keys
"""
if( event.key() == QtCore.Qt.Key_Return ):
WtfdmdgApplication.instance().processLine( self.toPlainText() )
self.clear()
elif( event.key() == QtCore.Qt.Key_Down ):
if event.modifiers() == QtCore.Qt.ShiftModifier:
WtfdmdgApplication.instance().selectNextTagClass()
else:
WtfdmdgApplication.instance().selectNextTask()
task = WtfdmdgApplication.instance().getSelectedTask()
self.preloadTask( task )
elif( event.key() == QtCore.Qt.Key_Up ):
if event.modifiers() == QtCore.Qt.ShiftModifier:
WtfdmdgApplication.instance().selectPreviousTagClass()
else:
WtfdmdgApplication.instance().selectPreviousTask()
task = WtfdmdgApplication.instance().getSelectedTask()
self.preloadTask( task )
elif( event.key() == QtCore.Qt.Key_Escape ):
self.clear()
else:
super( WtfdmdgCommandTextEdit, self ).keyPressEvent( event )
WtfdmdgApplication.instance().checkTaskSelect( self.toPlainText() )
WtfdmdgApplication.instance().redraw()
def preloadTask( self, task ):
"""
Set contents to this task
"""
if task is None:
self.setText( "" )
return
text = WtfdmdgApplication.instance().reverseTask( task )
self.setText( text )
class WtfdmdgTaskTable( QtWidgets.QTableWidget ):
def __init__( self ):
"""
Initialize task table widget
"""
super( WtfdmdgTaskTable, self ).__init__()
self.setColumnCount( 4 )
self.setGridStyle( QtCore.Qt.NoPen )
self.verticalHeader().setVisible( False )
self.hv = QtWidgets.QHeaderView( QtCore.Qt.Horizontal, self )
self.setHorizontalHeader( self.hv )
self.hv.setSectionResizeMode( 0, QtWidgets.QHeaderView.ResizeToContents )
self.hv.setSectionResizeMode( 1, QtWidgets.QHeaderView.ResizeToContents )
self.hv.setSectionResizeMode( 2, QtWidgets.QHeaderView.ResizeToContents )
self.hv.setSectionResizeMode( 3, QtWidgets.QHeaderView.Stretch )
self.setHorizontalHeaderLabels( ( "Ref", "Begin", "End", "Body" ) )
self.verticalHeader().setDefaultSectionSize( 15 )
self.setEditTriggers( QtWidgets.QAbstractItemView.NoEditTriggers )
self.setSelectionMode( QtWidgets.QAbstractItemView.NoSelection )
self.redraw( {} )
def redraw( self, session ):
"""
Draw all items in session
"""
app = WtfdmdgApplication.instance()
selectedRow = app.getSelectedTaskIndex()
self.setRowCount( len( session ) )
for rowi in range( len( session ) ):
task = app.getTaskByIndex( rowi )
cols = range( 4 )
vals = [
str( task.ref ),
str( "" if task.begin is None else datetime.datetime.strftime( task.begin, "%H:%M" ) ),
str( "" if task.end is None else datetime.datetime.strftime( task.end, "%H:%M" ) ),
str( task.body ) ]
for c, v in zip( cols, vals ):
i = QtWidgets.QTableWidgetItem( v )
if rowi == selectedRow:
i.setBackground( QtGui.QBrush( QtGui.QColor( 230, 230, 230 ) ) )
if task.begin is None or task.end is None:
fnt = QtGui.QFont()
fnt.setWeight( QtGui.QFont.Bold )
i.setFont( fnt )
else:
i.setForeground( QtGui.QColor( 150, 150, 150 ) )
self.setItem( rowi, c, i )
class WtfdmdgTagTable( QtWidgets.QTableWidget ):
def __init__( self ):
"""
Initialize task table widget
"""
super( WtfdmdgTagTable, self ).__init__()
self.setColumnCount( 2 )
self.setGridStyle( QtCore.Qt.NoPen )
self.verticalHeader().setVisible( False )
self.hv = QtWidgets.QHeaderView( QtCore.Qt.Horizontal, self )
self.setHorizontalHeader( self.hv )
self.hv.setSectionResizeMode( 0, QtWidgets.QHeaderView.ResizeToContents )
self.hv.setSectionResizeMode( 1, QtWidgets.QHeaderView.Stretch )
self.setHorizontalHeaderLabels( ( "Class", "Tags" ) )
self.verticalHeader().setDefaultSectionSize( 15 )
self.setEditTriggers( QtWidgets.QAbstractItemView.NoEditTriggers )
self.setSelectionBehavior( QtWidgets.QAbstractItemView.SelectRows )
self.redraw( {} )
def getSelectedTags( self ):
"""
Return class and list of selected tags
"""
tags = WtfdmdgApplication.instance().getTags()
cls = WtfdmdgApplication.instance().getSelectedTagClass()
if cls is not None:
return tags[ cls ]
return {}
def redraw( self, tagtable ):
"""
Draw all items in session
"""
app = WtfdmdgApplication.instance()
self.setRowCount( len( tagtable ) )
for rowi, ( cls, tags ) in enumerate( sorted( tagtable.items(), key=lambda x: x[0] ) ):
ci = QtWidgets.QTableWidgetItem( str( cls ) )
self.setItem( rowi, 0, ci )
te = QtGui.QTextEdit( " ".join( tags ) )
te.setReadOnly( True )
te.hl = app.getTagBankHighlighter( cls, te.document() )
te.setMinimumHeight( te.document().size().height() )
te.setVerticalScrollBarPolicy( QtCore.Qt.ScrollBarAlwaysOff )
te.document().setDocumentMargin( 0 )
te.setFrameStyle( QtGui.QFrame.NoFrame )
self.setCellWidget( rowi, 1, te )
class WtfdmdgTimelineWidget( pg.PlotWidget ):
class DateAxis( pg.AxisItem ):
def __init__( self, *args, **kwargs ):
super( WtfdmdgTimelineWidget.DateAxis, self ).__init__( *args, **kwargs )
fnt = WtfdmdgApplication.instance().font()
self.setStyle( tickFont=fnt )
def tickStrings( self, values, scale, spacing ):
strings = []
for v in values:
# vs is the original tick value
vs = v * scale
vstr = time.strftime( "%H:%M", time.localtime( vs ) )
strings.append( vstr )
return strings
def __init__( self ):
"""
Initialize the timeline widget
"""
ax = WtfdmdgTimelineWidget.DateAxis( orientation='left')
super( WtfdmdgTimelineWidget, self ).__init__( axisItems={'left': ax } )
self._barGraphItem = pg.BarGraphItem( x0=[], x1=[], y0=[], y1=[] )
self.addItem( self._barGraphItem )
self.getViewBox().setMouseEnabled( False, False )
self.hideAxis( 'bottom' )
self.invertY( True )
def redraw( self ):
"""
Plot everything
"""
self.clear()
tasks = [ x for x in WtfdmdgApplication.instance().getSession().values() if x.begin is not None and x.end is not None ]
tags = WtfdmdgApplication.instance().getSelectedTags()
if len( tasks ) <= 0:
return
def activeAt( t0, t1 ):
minute = datetime.timedelta( minutes=1 )
t0 = t0 + minute # Add a minute to prevent creating columns when one task ends exactly as another starts
return [ x for x in tasks if not ( t1 < ( x.begin ) or t0 > ( x.end ) ) ]
def numActiveAt( t0, t1 ):
return len( activeAt( t0, t1 ) )
importantTimes = sorted( set( x.begin for x in tasks ) | set( x.end for x in tasks ) )
s = [ numActiveAt( x, x ) for x in importantTimes ]
maxConcurrent = max( s )
width = 1.0 / maxConcurrent
columnAssignments = {}
for task in tasks:
columnAssignments[ task ] = None
for task in tasks:
conflicts = activeAt( task.begin, task.end )
for i in range( maxConcurrent ):
if i not in [ columnAssignments[ x ] for x in conflicts ]:
columnAssignments[ task ] = i
break
x0 = []
x1 = []
y0 = []
y1 = []
brushes = []
for task in sorted( tasks, key=lambda x: x.begin ):
coeff = columnAssignments[ task ]
x = coeff * width
x0.append( x )
x1.append( x + width )
y0.append( time.mktime( task.begin.timetuple() ) )
y1.append( time.mktime( task.end.timetuple() ) )
brushes.append( self._getBrush( task ) )
self._barGraphItem = pg.BarGraphItem( x0=x0, x1=x1, y0=y0, y1=y1, brushes=brushes )
self.addItem( self._barGraphItem )
def _getBrush( self, task ):
"""
Construct brush for this task
"""
app = WtfdmdgApplication.instance()
selectedTagClass = WtfdmdgApplication.instance().getSelectedTagClass()
allTags = WtfdmdgApplication.instance().getTags()
theseTags = WtfdmdgApplication.instance().getTagsForTask( task )
if selectedTagClass not in theseTags or len( theseTags[ selectedTagClass ] ) <= 0:
# This task has no tags in currently selected class, so no color
return pg.mkBrush( 200, 200, 200 )
theseTags = theseTags[ selectedTagClass ]
if len( theseTags ) == 1:
# This task has a single tag in the current class, solid color
return pg.mkBrush( *app.getTagColor( selectedTagClass, theseTags[0] ) )
else:
# This task has multiple colors in the current class, use a gradient
nc = len( theseTags )
gradient = QtGui.QLinearGradient( QtCore.QPointF( 0, 0 ), QtCore.QPointF( 1, 0 ) )
gradient.setSpread( QtGui.QGradient.RepeatSpread )
gradient.setCoordinateMode( QtGui.QGradient.ObjectMode );
for i, t in enumerate( theseTags ):
offset = float( i ) / ( nc - 1 )
color = QtGui.QColor( *app.getTagColor( selectedTagClass, t ) )
gradient.setColorAt( offset, color )
return pg.mkBrush( QtGui.QBrush( gradient ) )
#
# DEBUG DRIVER
#
if __name__ == "__main__":
sys.exit( WtfdmdgApplication( sys.argv ).exec_() )
|
import pathlib
import shutil
import pytest
import salt.exceptions
import salt.modules.aptpkg as aptpkg
import salt.modules.cmdmod as cmd
import salt.modules.file as file
import salt.utils.files
import salt.utils.stringutils
from tests.support.mock import Mock, patch
pytestmark = [
pytest.mark.skip_if_binaries_missing("apt-cache", "grep"),
]
@pytest.fixture
def configure_loader_modules(minion_opts):
return {
aptpkg: {
"__salt__": {
"cmd.run_all": cmd.run_all,
"cmd.run": cmd.run,
"file.replace": file.replace,
"file.append": file.append,
"file.grep": file.grep,
},
"__opts__": minion_opts,
},
file: {
"__salt__": {"cmd.run_all": cmd.run_all},
"__utils__": {
"files.is_text": salt.utils.files.is_text,
"stringutils.get_diff": salt.utils.stringutils.get_diff,
},
"__opts__": minion_opts,
},
}
@pytest.fixture()
def revert_repo_file(tmp_path):
try:
repo_file = pathlib.Path("/etc") / "apt" / "sources.list"
backup = tmp_path / "repo_backup"
# make copy of repo file
shutil.copy(str(repo_file), str(backup))
yield
finally:
# revert repo file
shutil.copy(str(backup), str(repo_file))
aptpkg.refresh_db()
def get_current_repo(multiple_comps=False):
"""
Get a repo currently in sources.list
multiple_comps:
Search for a repo that contains multiple comps.
For example: main, restricted
"""
with salt.utils.files.fopen("/etc/apt/sources.list") as fp:
for line in fp:
if line.startswith("#"):
continue
if "ubuntu.com" in line or "debian.org" in line:
test_repo = line.strip()
comps = test_repo.split()[3:]
if multiple_comps:
if len(comps) > 1:
break
else:
break
return test_repo, comps
def test_list_repos():
"""
Test aptpkg.list_repos
"""
ret = aptpkg.list_repos()
repos = [x for x in ret if "http" in x]
for repo in repos:
check_repo = ret[repo][0]
for key in [
"comps",
"dist",
"uri",
"line",
"architectures",
"file",
"type",
]:
assert key in check_repo
assert pathlib.Path(check_repo["file"]).is_file()
assert check_repo["dist"] in check_repo["line"]
if isinstance(check_repo["comps"], list):
assert " ".join(check_repo["comps"]) in check_repo["line"]
else:
assert check_repo["comps"] in check_repo["line"]
def test_get_repos():
"""
Test aptpkg.get_repos
"""
test_repo, comps = get_current_repo()
if not test_repo:
pytest.skip("Did not detect an apt repo")
exp_ret = test_repo.split()
ret = aptpkg.get_repo(repo=test_repo)
assert ret["type"] == exp_ret[0]
assert ret["uri"] == exp_ret[1]
assert ret["dist"] == exp_ret[2]
assert ret["comps"] == exp_ret[3:]
assert ret["file"] == "/etc/apt/sources.list"
def test_get_repos_multiple_comps():
"""
Test aptpkg.get_repos when multiple comps
exist in repo.
"""
test_repo, comps = get_current_repo(multiple_comps=True)
if not test_repo:
pytest.skip("Did not detect an ubuntu repo")
exp_ret = test_repo.split()
ret = aptpkg.get_repo(repo=test_repo)
assert ret["type"] == exp_ret[0]
assert ret["uri"] == exp_ret[1]
assert ret["dist"] == exp_ret[2]
assert ret["comps"] == exp_ret[3:]
def test_get_repos_doesnot_exist():
"""
Test aptpkg.get_repos when passing a repo
that does not exist
"""
for test_repo in [
"doesnotexist",
"deb http://archive.ubuntu.com/ubuntu/ focal-backports compdoesnotexist",
]:
ret = aptpkg.get_repo(repo=test_repo)
assert not ret
@pytest.mark.destructive_test
def test_del_repo(revert_repo_file):
"""
Test aptpkg.del_repo when passing repo
that exists. And checking correct error
is returned when it no longer exists.
"""
test_repo, comps = get_current_repo()
ret = aptpkg.del_repo(repo=test_repo)
assert "Repo '{}' has been removed".format(test_repo)
with pytest.raises(salt.exceptions.CommandExecutionError) as exc:
ret = aptpkg.del_repo(repo=test_repo)
assert "Repo {} doesn't exist".format(test_repo) in exc.value.message
def test_expand_repo_def():
"""
Test aptpkg.expand_repo_def when the repo exists.
"""
test_repo, comps = get_current_repo()
ret = aptpkg.expand_repo_def(repo=test_repo)
for key in [
"comps",
"dist",
"uri",
"line",
"architectures",
"file",
"type",
]:
assert key in ret
assert pathlib.Path(ret["file"]).is_file()
assert ret["dist"] in ret["line"]
if isinstance(ret["comps"], list):
for comp in ret["comps"]:
assert comp in ret["line"]
else:
assert ret["comps"] in ret["line"]
@pytest.mark.destructive_test
def test_mod_repo(revert_repo_file):
"""
Test aptpkg.mod_repo when the repo exists.
"""
test_repo, comps = get_current_repo()
msg = "This is a test"
with patch.dict(aptpkg.__salt__, {"config.option": Mock()}):
ret = aptpkg.mod_repo(repo=test_repo, comments=msg)
assert sorted(ret[list(ret.keys())[0]]["comps"]) == sorted(comps)
ret = file.grep("/etc/apt/sources.list", msg)
assert "#{}".format(msg) in ret["stdout"]
@pytest.mark.destructive_test
def test_mod_repo_no_file(tmp_path, revert_repo_file):
"""
Test aptpkg.mod_repo when the file does not exist.
It should create the file.
"""
test_repo, comps = get_current_repo()
test_file = str(tmp_path / "test_repo")
with patch.dict(aptpkg.__salt__, {"config.option": Mock()}):
ret = aptpkg.mod_repo(repo=test_repo, file=test_file)
with salt.utils.files.fopen(test_file, "r") as fp:
ret = fp.read()
assert test_repo.split()[1] in ret.strip()
for comp in comps:
assert comp in ret
|
import logging
from . import EncoderSelfAttentionSimultaneousNMT
logger = logging.getLogger('pysimt')
"""This is the training-time wait-k model from:
Ma et al. (2018), STACL: Simultaneous Translation with Implicit Anticipation
and Controllable Latency using Prefix-to-Prefix Framework, arXiv:1810.08398
The only required parameter is the `k` argument for training. When decoding,
pass the `k` argument explicitly to `pysimt translate`. A large enough `k`
should produce the same results as the `snmt.py` model.
"""
class EncoderSelfAttentionSimultaneousWaitKNMT(EncoderSelfAttentionSimultaneousNMT):
def set_defaults(self):
super().set_defaults()
self.defaults.update({
# Decoding/training simultaneous NMT args
'translator_type': 'wk', # This model implements train-time wait-k
'translator_args': {'k': 1e4}, # k as in wait-k in training
'consecutive_warmup': 0, # consecutive training for this many epochs
})
def __init__(self, opts):
super().__init__(opts)
assert self.opts.model['translator_type'] != 'bs', \
'Beam search not compatible with simultaneous models'
def forward(self, batch, **kwargs):
"""Training forward-pass with explicit timestep-based loop."""
loss = 0.0
k = int(self.opts.model['translator_args']['k'])
if self.training:
epoch_count = kwargs['ectr']
if epoch_count <= self.opts.model['consecutive_warmup']:
# warming up, use full contexts
k = int(1e4)
# Cache encoder states first
self.cache_enc_states(batch)
# Initial state is None i.e. 0.
h = self.dec.f_init()
# Convert target token indices to embeddings -> T*B*E
y = batch[self.tl]
y_emb = self.dec.emb(y)
# -1: So that we skip the timestep where input is <eos>
for t in range(y_emb.size(0) - 1):
###########################################
# waitk: pass partial context incrementally
###########################################
state_dict = self.get_enc_state_dict(up_to=k + t)
log_p, h = self.dec.f_next(state_dict, y_emb[t], h)
loss += self.dec.nll_loss(log_p, y[t + 1])
return {
'loss': loss,
'n_items': y[1:].nonzero(as_tuple=False).size(0),
}
|
from datetime import datetime
from datetime import timezone
from decimal import Decimal
from typing import List
from typing import Optional
from django.db import transaction
from django.db.models import Q
from base_site.mainapp.models import FamilyMember
from base_site.mainapp.models import Records
from base_site.nubank.models import NubankBankStatement
from base_site.nubank.models import NubankCards
from base_site.nubank.models import NubankItemSetup
from base_site.nubank.models import NubankStatement
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta
from slugify import slugify
def process_nubank_statements():
statements = NubankStatement.objects.filter(is_processed=False).all()
for s in statements:
with transaction.atomic():
split_statements_and_create_records(s)
def split_statements_and_create_records(s: NubankStatement):
setup = get_setup(s.description)
if setup:
_update_statement(s)
values_and_date = get_values_and_dates(s)
for item in values_and_date:
create_records(item["value"], s.description, s.item_time, item["date"], setup)
def _update_statement(s: NubankStatement) -> None:
s.is_processed = True
s.save()
def get_setup(
description: str, name: Optional[FamilyMember] = None, value: Optional[int] = None
) -> Optional[NubankItemSetup]:
slug_description = slugify(description, replacements=[["*", ""]])
qs_setup = NubankItemSetup.objects.filter(description_slug=slug_description).filter(
Q(check_name=name) | Q(check_name__isnull=True)
)
if not value:
return qs_setup.first()
setup = qs_setup.filter(check_value__gt=value, check_value_operator="<").first()
if setup:
return setup
setup = qs_setup.filter(check_value__lt=value, check_value_operator=">").first()
if setup:
return setup
setup = qs_setup.filter(check_value=value, check_value_operator="=").first()
if setup:
return setup
return None
def get_values_and_dates(s: NubankStatement) -> List:
registers = []
first_date = calculate_record_date(s.item_time)
if s.details.get("charges"):
count = s.details.get("charges").get("count")
amount = s.details.get("charges").get("amount")
for n in range(count):
next_date = first_date + relativedelta(months=n)
registers.append({"date": next_date, "value": Decimal(amount) / 100})
else:
registers.append({"date": first_date, "value": s.amount})
return registers
def create_records(debit: Decimal, description: str, item_date_create, item_date_executed, setup: NubankItemSetup):
if setup:
Records.objects.create(
create_date_time=item_date_create,
payment_date_time=item_date_executed,
debit=debit,
category=setup.category,
name=setup.name,
type_entry=setup.type_entry,
description=description,
)
def calculate_record_date(item_date):
if item_date.day >= 8:
next_month = item_date + relativedelta(months=1)
return datetime.strptime(f"15/{next_month.month}/{next_month.year}", "%d/%m/%Y")
else:
return datetime.strptime(f"15/{item_date.month}/{item_date.year}", "%d/%m/%Y")
def process_nubank_bank_statements():
statements = NubankBankStatement.objects.filter(is_processed=False).all()
for s in statements:
card = NubankCards.objects.filter(cpf=s.cpf).first()
setup = get_setup(s.title, card.name, s.amount)
if not (s.is_credit() or s.is_debit()):
continue
if setup:
with transaction.atomic():
Records.objects.create(
create_date_time=s.post_date,
payment_date_time=s.post_date,
debit=s.amount if s.is_debit() else None,
credit=s.amount if s.is_credit() else None,
category=setup.category,
name=setup.name,
type_entry=setup.type_entry,
description=s.title,
)
s.is_processed = True
s.save()
|
pkgname = "python-charset-normalizer"
pkgver = "2.0.7"
pkgrel = 0
build_style = "python_module"
hostmakedepends = ["python-setuptools"]
checkdepends = ["python-pytest"]
depends = ["python"]
pkgdesc = "Encoding and language detection"
maintainer = "q66 <q66@chimera-linux.org>"
license = "MIT"
url = "https://charset-normalizer.readthedocs.io"
source = f"https://github.com/Ousret/charset_normalizer/archive/refs/tags/{pkgver}.tar.gz"
sha256 = "6473e80f73f5918254953073798a367f120cc5717e70c917359e155901c0e2d0"
# dependency of pytest
options = ["!check"]
def post_install(self):
self.install_license("LICENSE")
|
#
# Author : Marcos Teixeira
# SkyNet is watching you
#
# common imports
import numpy as np
import pandas as pd
import os
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score,precision_score,recall_score,f1_score
import matplotlib.pyplot as plt
import lightgbm as lgb
def linear_regression_experiment(xtrain, xtest, ytrain, ytest):
# baseline approach : Linear Regression using all variables
from sklearn.linear_model import LogisticRegression
# building the model
model = LogisticRegression()
model.fit(xtrain, ytrain)
preds = model.predict(xtest)
accuracy = accuracy_score(preds,ytest)
recall = recall_score(preds,ytest)
precision = precision_score(preds,ytest)
f1 = f1_score(preds,ytest)
print("accuracy : {}".format(accuracy))
print("recall : {}".format(recall))
print("precision : {}".format(precision))
print("f1 score : {}".format(f1))
# accuracy : 0.9994666666666666
# recall : 1.0
# precision : 0.68
# f1 score : 0.8095238095238095
def lightGBM_experiment(xtrain, xtest, ytrain, ytest, columns, plot_importance=False):
# parameters for LightGBMClassifier
params = {
'objective' :'multiclass',
'learning_rate' : 0.02,
'num_leaves' : 31,
'is_unbalance': 'true',
"max_depth": -1,
"num_class": 2,
'feature_fraction': 0.5,
'bagging_fraction': 0.5,
'boosting_type' : 'gbdt',
'verbosity': 1
}
lgtrain = lgb.Dataset(xtrain,ytrain)
clf = lgb.train(params, lgtrain, 300,feature_name=list(columns))
preds = clf.predict(xtest)
preds = np.argmax(preds, axis=1)
accuracy = accuracy_score(preds,ytest)
recall = recall_score(preds,ytest)
precision = precision_score(preds,ytest)
f1 = f1_score(preds,ytest)
print("accuracy : {}".format(accuracy))
print("recall : {}".format(recall))
print("precision : {}".format(precision))
print("f1 score : {}".format(f1))
# accuracy : 0.9996666666666667
# recall : 0.9545454545454546
# precision : 0.84
# f1 score : 0.8936170212765958
if plot_importance:
ax = lgb.plot_importance(clf)
ax.plot()
plt.show()
def NN_experiment(xtrain, xtest, ytrain, ytest, plot_importance=True):
# baseline approach : Linear Regression using all variables
from sklearn.neural_network import MLPClassifier
# building the model
model = MLPClassifier(hidden_layer_sizes=(200, ), activation='relu', solver='adam', alpha=0.0001,
batch_size='auto', learning_rate='constant', learning_rate_init=0.001, power_t=0.5, max_iter=200,
shuffle=True, random_state=None, tol=0.0001, verbose=False, warm_start=False, momentum=0.9,
nesterovs_momentum=True, early_stopping=False, validation_fraction=0.1, beta_1=0.9, beta_2=0.999,
epsilon=1e-08, n_iter_no_change=10)
model.fit(xtrain, ytrain)
preds = model.predict(xtest)
accuracy = accuracy_score(preds,ytest)
recall = recall_score(preds,ytest)
precision = precision_score(preds,ytest)
f1 = f1_score(preds,ytest)
print("accuracy : {}".format(accuracy))
print("recall : {}".format(recall))
print("precision : {}".format(precision))
print("f1 score : {}".format(f1))
# accuracy : 0.9996333333333334
# recall : 0.9333333333333333
# precision : 0.84
# f1 score : 0.8842105263157894
# paths
DATASITH_PATH='/Users/marcostexeira/Downloads/DESAFIO_CREDITO/'
DATASITH_FILE='desafio_fraude.csv'
def load_fraud_data(data_path,file):
csv_path = os.path.join(data_path, file)
return pd.read_csv(csv_path)
# loading data
dataset = load_fraud_data(DATASITH_PATH,DATASITH_FILE)
np_dataset = dataset.values
# data split
xtrain, xtest, ytrain, ytest = train_test_split(np_dataset[:, :-1],np_dataset[:, -1],test_size=0.2, random_state=42)
ytrain = ytrain.astype(int)
ytest = ytest.astype(int)
lightGBM_experiment(xtrain, xtest, ytrain, ytest, dataset.columns[:-1].values,True)
|
# %% Packages
import os
from typing import Dict
import mlflow
from keras.engine.functional import Functional
from mlflow.models.signature import ModelSignature
from src.utils.logging import get_logger
# %% Logger
logger = get_logger()
# %% Class
class MLFlowGateway:
def __init__(self, name: str) -> None:
self.name = name
self.mlflow_client = mlflow.tracking.MlflowClient()
self.experiment_id = self.get_experiment()
def get_experiment(self) -> str:
"""This method loads the experiment. Either there is an experiment already
in existence with a certain name, or we have to create the experiment with
the provided name. We then extract the experiment id with which we can
create runs for that experiment.
:return: Returning the experiment Id
:rtype: str
"""
experiment_id = self.mlflow_client.get_experiment_by_name(self.name)
if experiment_id is None:
experiment_id = self.mlflow_client.create_experiment(self.name)
else:
experiment_id = experiment_id.experiment_id
return experiment_id
def log_run(
self,
model: Functional,
signature: ModelSignature,
figure_path: str,
metrics_dict: Dict[str, float],
params_dict: Dict[str, float],
) -> None:
"""This method logs the model, metrics and parameters for every run.
Furthermore, the model signature is saved with the model. The run is triggered
under the specific experiment id.
:param model: The CNN which was trained priorly
:type model: Functional
:param signature: The model signature which describes the necessary input
and output
:type signature: ModelSignature
:param figure_path: The path from which we are saving the figures from
:type figure_path: str
:param metrics_dict: Dictionary containing model metrics
:type metrics_dict: Dict
:param params_dict: Dictionary containing model parameters
:type params_dict: Dict
"""
logger.info(f"Creating a new run under the experiment id {self.experiment_id}")
with mlflow.start_run(experiment_id=self.experiment_id) as run:
mlflow.log_params(params_dict)
mlflow.log_metrics(metrics_dict)
mlflow.log_artifacts(figure_path, artifact_path="figures")
mlflow.keras.log_model(
keras_model=model,
artifact_path="model",
signature=signature,
)
def get_run(self, criterion: str, prediction_type: str) -> Functional:
"""This method retrieves a past run, using the criterion to find out which
run to use. Afterwards we retrieve the run_id with which we then can load
the stored keras model.
:param criterion: Criterion by which we would like to retrieve the model
:type criterion: str
:param prediction_type: Only used for the logs
:type prediction_type: str
:raises ValueError: For the cases where we cannot retrieve any run
:return: The previously trained keras model
:rtype: Functional
"""
logger.info(f"Loading the {criterion} {prediction_type} model for {self.name}")
if criterion == "latest":
order_criterion = "attribute.start_time DESC"
else:
order_criterion = f"metrics.{criterion} DESC"
try:
run = self.mlflow_client.search_runs(
experiment_ids=self.experiment_id,
order_by=[order_criterion],
max_results=1,
).pop()
run_id = run.info.run_id
except Exception as e:
raise ValueError(f"The MLFlow does not contain any runs - {e}")
model_path = os.path.join("runs:", run_id, "model")
return mlflow.keras.load_model(model_path)
|
#!/usr/bin/env python3
macaddress = "00:15:82:92:BF:50"
width = 384
|
#
# Collective Knowledge (generate AE table)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: Grigori Fursin, Grigori.Fursin@cTuning.org, http://fursin.net
#
cfg={} # Will be updated by CK (meta description of this module)
work={} # Will be updated by CK (temporal data)
ck=None # Will be updated by CK (initialized CK kernel)
# Local settings
##############################################################################
# Initialize module
def init(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
return {'return':0}
##############################################################################
# generate table with AE members per year
def generate(i):
"""
Input: {
(output_txt_file) - if !='', generate text file for a given conference
(conf_id) - record names for this conf
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
import copy
s=''
tags_desc=i.get('tags_desc','')
otf=i.get('output_text_file','')
conf_id=i.get('conf_id','')
tt=''
s+='<table border="0" cellpadding="3" cellspacing="0" class="ck_margin_40px">\n\n'
s+=' <tr><td><b>Name:</b></td> <td><b>Organization:</b></td>'
x=''
y='style="background-color:#efefef"'
highlight1=False
for q in tags_desc:
if highlight1:
x1=x
highlight1=False
else:
x1=y
highlight1=True
tg=q['id']
name=q['name']
s+='<td '+x1+' align="center"><b>'+name+'</b></td>'
s+='</tr>\n'
aec={}
aecx={}
selected_aec={}
selected_aecx={}
for q in tags_desc:
tg=q['id']
name=q['name']
# List all people
ii={'action':'search',
'module_uoa':cfg['module_deps']['person'],
'tags':tg,
'add_meta':'yes'
}
r=ck.access(ii)
if r['return']>0: return r
lst=r['lst']
for q in lst:
d=q.get('meta',{})
n=d.get('name','')
o=d.get('organization','')
if n!='':
# Sort by surname
n1=''
n2=n
j=n.rfind(' ')
if j>0:
n1=n[:j].strip()
n2=n[j+1:].strip()
ny=n2+' '+n1
aecx[ny]=n
if ny not in aec:
aec[ny]={'tags':[]}
aec[ny]['tags'].append(tg)
aec[ny]['org']=o
if conf_id==tg:
selected_aec[ny]=copy.deepcopy(aec[ny])
selected_aecx[ny]=copy.deepcopy(aecx[ny])
highlight=True
for q in sorted(aec):
n=aecx[q]
t=aec[q]['tags']
o=aec[q]['org']
if highlight:
x='style="background-color:#dfcfff"'
y='style="background-color:#cf9fff"'
highlight=False
else:
x=''
y='style="background-color:#efefef"'
highlight=True
s+=' <tr '+x+'><td>'+n+'</td><td>'+o+'</td>'
highlight1=False
for q in tags_desc:
if highlight1:
x1=x
highlight1=False
else:
x1=y
highlight1=True
tg=q['id']
name=q['name']
sx=''
if tg in t: sx='<b>*</b>'
s+='<td '+x1+' align="center">'+sx+'</td>'
s+='</tr>\n'
s+='\n'
s+='</table>\n'
ck.out(s)
if otf!='' and len(selected_aec)>0:
j=0
for q in sorted(selected_aec):
j+=1
org=selected_aec[q]['org']
org=org.replace(' (',', ').replace(')','')
tt+=str(j)+') '+selected_aecx[q]+' ('+org+')\n'
r=ck.save_text_file({'text_file':otf, 'string':tt})
if r['return']>0: return r
return {'return':0}
|
from __future__ import print_function
import torch
import codecs
import re
import datetime
import matplotlib
import numpy as np
import torch.functional as F
import torch.nn.functional as F
import os
import pickle
from torch.autograd import Variable
from annoy import AnnoyIndex
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from matplotlib import pyplot
#DEVICE = torch.device("cuda:0")
FILES = ['data/queries.dev.tsv']
#FILES = ['data/collection.tsv']
#FILES = ['data/queries.dev.tsv','data/queries.eval.tsv','data/queries.train.tsv','data/collection.tsv']
DEVICE = torch.device("cpu")
EMBEDDING_DIMENSION = 50
EPOCHS = 2
MB_SIZE = 5000
VOCAB_SIZE = 100000
learning_rate = 1e-2
WINDOW = 5
regex_drop_char = re.compile('[^a-z0-9\s]+')
regex_multi_space = re.compile('\s+')
class MSMARCO(Dataset):
def __init__(self, datapath):
self.data = pickle.load(open(datapath, 'rb'))
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
center, context = self.data[idx]
return center, context
def print_message(s):
print("[{}] {}".format(datetime.datetime.utcnow().strftime("%b %d, %H:%M:%S"), s), flush=True)
def get_input_layer(word_idx):
x = torch.zeros(VOCAB_SIZE).float()
x[word_idx] = 1.0
return x
def generate_vocabulary():
print_message('Converting MSMARCO files to corpus and building vocab')
word_count = {}
word_count['<UNK>'] = 1
corpus_length = 0
for a_file in FILES:
print_message("Loading file {}".format(a_file))
with codecs.open(a_file,'r', encoding='utf-8') as f:
for l in f:
l = l.strip().split('\t')
if len(l) > 1:
l = l[1]
else:
l = l[0]
for word in regex_multi_space.sub(' ', regex_drop_char.sub(' ', l.lower())).strip().split():
if word not in word_count:
word_count[word] = 0
word_count[word] += 1
corpus_length += 1
print_message('Done reading vocab.There are {} unique words and the corpus is {} words long'.format(len(word_count), corpus_length))
return word_count
def skipgram(sentence, word2idx):
#create skipgram pairs given a sentence and a desired vocab
idx_pairs = []
sentence_length = len(sentence)
for i in range(0,sentence_length):
center_word = sentence[i]
if center_word != '<UNK>':
center_idx = word2idx[center_word]
for j in range(1,WINDOW):
if i+j < sentence_length:
if sentence[i+j] != '<UNK>':
idx_pairs.append((center_idx, word2idx[sentence[i+j]]))
if i-j >= 0 and i-j != i:
if sentence[i-j] != '<UNK>':
idx_pairs.append((center_idx, word2idx[sentence[i-j]]))
return idx_pairs
def make_pairs(word_count):
#Given a word fequency dict we take the most common in our Vocab size and then go ahead and generate all possible skipgrams(center word, context word in window +-)
idx2word = sorted(word_count, key=word_count.get, reverse=True)[:VOCAB_SIZE]
word2idx = {idx2word[idx]: idx for idx, _ in enumerate(idx2word)}
vocab = set([word for word in word2idx])
pickle.dump(word_count, open('data/wc.txt', 'wb'))
pickle.dump(vocab, open('data/vocab.txt', 'wb'))
pickle.dump(idx2word, open('data/idx2word.txt', 'wb'))
pickle.dump(word2idx, open('data/word2idx.txt', 'wb'))
print_message("Creating Train file")
pairs = []
count = 0
with open('data/corpus.txt','w', encoding='utf-8') as corpus:
for a_file in FILES:
print_message("Loading file {}".format(a_file))
with codecs.open(a_file,'r', encoding='utf-8') as f:
for l in f:
if count % 100000 == 0:
print_message("Processed {} lines so far".format(count))
count += 1
l = l.strip().split('\t')
if len(l) > 1:
l = l[1]
else:
l = l[0]
cleaned_sentence = create_sentence(vocab,l)
corpus.write(cleaned_sentence+ '\n')
pairs += skipgram(cleaned_sentence.split(),word2idx)
pickle.dump(pairs, open('data/pairs.txt','wb'))
print_message('Done Processing')
def create_sentence(vocab,sentence):
#This takes in a vocab and a sentence and replaces any word not in vocab with UNK. Note this suboptimal and slow.
output = ''
for word in sentence.split():
if word in vocab:
output += word
else:
output += '<UNK>'
output += ' '
return output[:-1]
def annoyGlove():
print_message("Starting Annoy Glove")
i = 0
word2idx={}
idx2word = {}
idx2vec = []
with open('data/glove.6B.50d.txt','rb') as f:
for l in f:
l = l.strip().split()
word2idx[l[0]] = i
idx2word[i] = l[0]
idx2vec.append(np.array(l[1:],dtype=float))
i += 1
idx2vec = np.array(idx2vec)
t = AnnoyIndex(EMBEDDING_DIMENSION)
for i in range(0,400000):
t.add_item(i,idx2vec[i])
t.build(100)
t.save('data/glove6b50d.ann')
x = np.zeros(EMBEDDING_DIMENSION)
x = idx2vec[word2idx[b'the']] + idx2vec[word2idx[b'fall']] + idx2vec[word2idx[b'of']] + idx2vec[word2idx[b'the']] + idx2vec[word2idx[b'roman']] + + idx2vec[word2idx[b'empire']]
neighbors = t.get_nns_by_vector(x,5, include_distances=True)
for i in range(1,5):
print_message("Closest item to 'the fall of the roman empire' is {} with {} distance".format(idx2word[neighbors[0][i]], neighbors[1][i]))
analogy = idx2vec[word2idx[b'king']]-idx2vec[word2idx[b'man']]+idx2vec[word2idx[b'woman']]
neighbors = t.get_nns_by_vector(analogy,5,include_distances=True)
for i in range(1,5):
print_message("Closest item to 'king-man+woman' is {} with {} distance".format(idx2word[neighbors[0][i]], neighbors[1][i]))
search_index = word2idx[b'cat']
neighbors = t.get_nns_by_item(search_index,5, include_distances=True)
for i in range(1,5):
print_message("Closest item to {} is {} with {} distance".format(idx2word[search_index], idx2word[neighbors[0][i]], neighbors[1][i] ))
search_index = word2idx[b'war']
neighbors = t.get_nns_by_item(search_index,5, include_distances=True)
for i in range(1,5):
print_message("Closest item to {} is {} with {} distance".format(idx2word[search_index], idx2word[neighbors[0][i]], neighbors[1][i] ))
print_message("war is {} far away from cat".format(t.get_distance(search_index,word2idx[b'cat'])))
print_message("war is {} far away from exemplification".format(t.get_distance(search_index,word2idx[b'exemplification'])))
print_message("war is {} far away from battle".format(t.get_distance(search_index,word2idx[b'battle'])))
def annoyMSMARCO():
print_message("Starting Annoy MSMARCO")
word2idx = pickle.load(open('data/word2idx.txt', 'rb'))
idx2word = pickle.load(open('data/idx2word.txt', 'rb'))
idx2vec = pickle.load(open('data/idx2vec.txt', 'rb'))
t = AnnoyIndex(EMBEDDING_DIMENSION)
for i in range(0,VOCAB_SIZE-1):
t.add_item(i,idx2vec[i])
t.build(100)
#t.save('MSMARCO.ann')
#t.load('MSMARCO.ann')
search_index = word2idx['cat']
neighbors = t.get_nns_by_item(search_index,5, include_distances=True)
for i in range(1,5):
print_message("Closest item to {} is {} with {} distance".format(idx2word[search_index], idx2word[neighbors[0][i]], neighbors[1][i] ))
search_index = word2idx['war']
neighbors = t.get_nns_by_item(search_index,5, include_distances=True)
for i in range(1,5):
print_message("Closest item to {} is {} with {} distance".format(idx2word[search_index], idx2word[neighbors[0][i]], neighbors[1][i] ))
print_message("war is {} far away from cat".format(t.get_distance(search_index,word2idx['cat'])))
print_message("war is {} far away from exemplification".format(t.get_distance(search_index,word2idx['exemplification'])))
print_message("war is {} far away from battle".format(t.get_distance(search_index,word2idx['battle'])))
sentence = 'the fall of roman empire'
x = np.zeros(EMBEDDING_DIMENSION)
for word in sentence.split():
if word in word2idx:
x += idx2vec[word2idx[word]]
neighbors = t.get_nns_by_vector(x,5, include_distances=True)
for i in range(1,5):
print_message("Closest item to 'the fall of the roman empire' is {} with {} distance".format(idx2word[neighbors[0][i]], neighbors[1][i]))
analogy = idx2vec[word2idx['king']]-idx2vec[word2idx['man']]+idx2vec[word2idx['woman']]
t.get_nns_by_vector(analogy,5,include_distances=True)
for i in range(1,5):
print_message("Closest item to 'king-man+woman' is {} with {} distance".format(idx2word[neighbors[0][i]], neighbors[1][i]))
def plotGlove():
print_message("Modeling in TSNE GLOVE")
word2idx={}
idx2word = {}
idx2vec = []
i = 0
with open('data/glove.6B.50d.txt','rb') as f:
for l in f:
l = l.strip().split()
word2idx[l[0]] = i
idx2word[i] = l[0]
idx2vec.append(np.array(l[1:],dtype=float))
i += 1
idx2vec = np.array(idx2vec)
model = TSNE(n_components=2, perplexity=30, init='pca', method='exact', n_iter=5000)
X = idx2vec[:1000]
X = model.fit_transform(X)
pyplot.figure(figsize=(50,50))
for i in range(len(X)):
pyplot.text(X[i, 0], X[i, 1], idx2word[i], bbox=dict(facecolor='blue', alpha=0.1))
pyplot.xlim((np.min(X[:, 0]), np.max(X[:, 0])))
pyplot.ylim((np.min(X[:, 1]), np.max(X[:, 1])))
pyplot.savefig('TSNEGlove.png')
print_message("Image Saved")
def plotMSMARCO():
print_message("Modeling in TSNE MSMARCO")
wc = pickle.load(open('data/wc.txt', 'rb'))
word2idx = pickle.load(open('data/word2idx.txt', 'rb'))
idx2vec = pickle.load(open('data/idx2vec.txt', 'rb'))
words = sorted(wc, key=wc.get, reverse=True)[:1000]
model = TSNE(n_components=2, perplexity=30, init='pca', method='exact', n_iter=5000)
X = [idx2vec[word2idx[word]] for word in words]
X = model.fit_transform(X)
pyplot.figure(figsize=(50,50))
for i in range(len(X)):
pyplot.text(X[i, 0], X[i, 1], words[i], bbox=dict(facecolor='blue', alpha=0.1))
pyplot.xlim((np.min(X[:, 0]), np.max(X[:, 0])))
pyplot.ylim((np.min(X[:, 1]), np.max(X[:, 1])))
pyplot.savefig('TSNEMSMARCO.png')
print_message("Image Saved")
def train():
W1 = torch.randn(EMBEDDING_DIMENSION, VOCAB_SIZE, dtype=torch.float,device=DEVICE, requires_grad=True)
W2 = torch.randn(VOCAB_SIZE, EMBEDDING_DIMENSION, dtype=torch.float,device=DEVICE, requires_grad=True)
dataloader = DataLoader(MSMARCO('data/pairs.txt'), MB_SIZE, shuffle=True)
epoch = 0
for center,context in dataloader:
if epoch > EPOCHS:
break
total_loss = 0
for i in tqdm(range(0,MB_SIZE)):
x = Variable(get_input_layer(center[i])).float().to(DEVICE)
y = Variable(torch.from_numpy(np.array([context[i]])).long()).to(DEVICE)
z1 = torch.matmul(W1, x).to(DEVICE)
z2 = torch.matmul(W2, z1).to(DEVICE)
log_softmax = F.log_softmax(z2, dim=0).to(DEVICE)
loss = F.nll_loss(log_softmax.view(1,-1), y)
total_loss += loss.item()
loss.backward()
W1.data -= learning_rate * W1.grad.data
W2.data -= learning_rate * W2.grad.data
tmp = W1.grad.data.zero_()
tmp = W2.grad.data.zero_()
del x, y, z1,z2, log_softmax,loss, tmp
torch.cuda.empty_cache()
epoch += 1
print_message("Epoch {}: loss {}".format(epoch,total_loss/MB_SIZE))
idx2vec = W2.data.cpu().numpy()
pickle.dump(idx2vec, open('data/idx2vec.txt', 'wb'))
print_message("Word2Vec Finished Training")
if __name__ == '__main__':
word_count = generate_vocabulary()
make_pairs(word_count)
train()
plotGlove()
plotMSMARCO()
annoyGlove()
annoyMSMARCO()
|
def funcao1(funcao, *args, **kwargs):
return funcao(*args, **kwargs)
def funcao2(nome):
return f'Oi {nome}'
def funcao3(nome, saudacao):
return f'{saudacao} {nome}'
executando = funcao1(funcao2, 'Luiz')
print(executando)
executando = funcao1(funcao3, 'Luiz', saudacao='Bom dia')
print(executando)
|
#! /usr/bin/python3
import select
import subprocess
import sys
import threading
import time
# Own libraries from immediate directory
from paxlib import *
import read_kbd_direct
LAN_IFACE = "wlan0"
io_terminal = False # use terminal
io_pax = True # use keypad, pax display
try:
import readchar
# Usage:
# char = repr(readchar.readchar())
# key = repr(readchar.readkey())
except ImportError:
print("Module not found: readchar")
print("Run: sudo pip3 install readchar")
sys.exit()
class Wifi:
# Object global variables:
# scroll[]
# networks{}
def __init__(self):
pass
def display(self, myprint="", linesegments=[], delay=0):
if io_terminal and myprint != "":
print(myprint)
if io_pax:
display_lines(linesegments, delay)
def getkey(self):
timeout = 0.1
while True:
# key = self.kbd.read_from()
# if key is not None:
# return key
# Try keypad next
keypad = spin(timeout)
if keypad[0]:
key = keypad[1]
return key
def wpa_cli(self, cmd, n_id=-1, parm=""):
# Need full path name: wpa_cli is not in PATH when executed
# from a @reboot in crontab
wpa_cli_in = '/usr/sbin/wpa_cli -i' + LAN_IFACE + " " + cmd
if n_id >= 0:
wpa_cli_in += " " + str(n_id) + " " + parm
wpa_cli_in = wpa_cli_in.split()
self.pid = subprocess.Popen(wpa_cli_in, stdin=subprocess.PIPE, \
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return self.pid.communicate()[0].decode("utf-8")
def scan_results(self):
names = {}
result = self.wpa_cli("scan_results")
lines = result.splitlines()[1:] # separate lines, discard 1st line
for line in lines:
ll = line.split()
if len(ll) >= 5 and ll[4][0:2] != "\\x": # name can be 0x00-not wanted
names[ll[4]] = (int(ll[2]), ll[3])
return names
def list_networks(self):
self.networks = {}
result = self.wpa_cli("list_networks")
lines = result.splitlines()[1:] # separate lines, discard 1st line
for line in lines:
ll = line.split()
if ll[1] == "any":
ll[1] = ""
self.networks[ll[1]] = int(ll[0])
def get_network_password(self):
self.kbd = read_kbd_direct.KDirect()
if self.kbd.keyboard is None:
e_password = "No kbd found."
linesegments = [e_password, "Pword: no chnge."]
self.display(myprint=e_password, linesegments=linesegments, delay=2)
return ""
e_password = "Enter password: "
linesegments = [e_password, ""]
self.display(myprint=e_password, linesegments=linesegments, delay=0)
line = ""
while True:
# key = readchar.readchar()
key = self.kbd.read_from()
# print(key.encode().hex(), flush=True)
if key in ["\r", "\n"]:
# print()
self.kbd.exit() # cleanup keyboard routines.
return line
if key in ["\x03", "\x1b"]: # ^C, esc
# print()
self.kbd.exit() # cleanup keyboard routines.
return ""
if key == "\x7f":
# print("\r" + " " * len(line), end="", flush=True)
line = line[0:-1]
else:
line += key
# print("\r" + line, end="", flush=True)
linesegments[1] = line
display_lines(linesegments, 0)
def add_network(self, nname="", locked=False):
password = ""
try:
if nname == "":
print("Enter values, ^C to exit.")
nname = input("New network name: ")
if locked:
# password = input("Network password: ")
password = self.get_network_password()
if len(password) == 0:
return False
except KeyboardInterrupt:
print()
return False
id = int(self.wpa_cli("add_network"))
result = self.wpa_cli("set_network", id, parm='ssid "'+nname+'"')
if len(password) == 0:
result = self.wpa_cli("set_network", id, parm='key_mgmt NONE')
else:
result = self.wpa_cli("set_network", id, parm='psk "'+password+'"')
result = self.wpa_cli("set_network", id, parm='key_mgmt WPA-PSK')
result = self.wpa_cli("enable_network", id)
result = self.wpa_cli("save_config")
return True
def remove_network(self, ssid=""):
if ssid == "":
try:
print("Enter value, ^C to exit.")
id = input("Network # to remove: ")
id = int(id)
except KeyboardInterrupt:
print()
return
except ValueError:
print("(ignored)")
return
else:
id = self.networks[ssid]
result = self.wpa_cli("remove_network", id)
discard = self.wpa_cli("save_config")
return result
def select_network(self, ssid=""):
if ssid == "":
try:
print("Enter value, ^C to exit.")
id = input("Enter network to use: ")
id = int(id)
except KeyboardInterrupt:
print()
return
except ValueError:
print("(ignored)")
return
else:
try:
id = self.networks[ssid]
except KeyError: # bad -id - typcally: No wifi
return # we then check for status
result = self.wpa_cli("select_network", id)
time.sleep(1)
return result
def reconfigure(self):
result = self.wpa_cli("reconfigure")
return result
def status(self):
status = {}
result = self.wpa_cli("status",)
for line in result.splitlines():
ll = line.split("=")
if ll[0] in ("id", "ssid", "key_mgmt", "ip_address", "wpa_state"):
status[ll[0]] = ll[1]
return status
def get_IP2(self):
'''Duplicate of function in paxconfig - fix sometime will you?
'''
status = wifi.status()
if "ssid" in status:
linesegments = [cap("AP:" + status["ssid"], 16)]
linesegments.append(status["ip_address"])
else:
linesegments = ["Not connected", "to wifi."]
self.display(myprint="", linesegments=linesegments, delay=3)
def is_locked(self, key_mgmt):
key_mgmt = key_mgmt.replace("[",'') # discard "["
key_mgmt = key_mgmt.split("]") # split on "]"
for key in key_mgmt:
if key[0:3] == "WPA":
return True
return False
def sig_strength(self, db):
if db in range(-20, 0):
return 4
if db in range(-70,-21):
return 3
if db in range(-80, -71):
return 2
return 1
def build_term_line(self, d):
locked = "L" if d["locked"] else " "
defined = "F" if d["defined"] else " "
current = "*" if d["current_ap"] else " "
return current + locked + d["strength"] + defined + " " + d["ssid"]
def build_pax_line(self, d):
locked = "Lkd " if d["locked"] else "Unl "
defined = "Dfd " if d["defined"] else "Und "
current = "AP " if d["current_ap"] else ""
return [cap(d["ssid"], 16), "S"+d["strength"] + " " + locked + defined + current]
def build_scroll(self):
status = self.status()
self.list_networks()
ssids = self.scan_results()
self.scroll = []
for ssid in ssids:
try:
current_ap = status["ssid"] == ssid and \
status["ip_address"][0:7] != "169.254" # T/F value
except KeyError: # not defined - hence it's the "no wifi" case
current_ap = False
locked = self.is_locked(ssids[ssid][1])
strength = str(self.sig_strength(ssids[ssid][0]))
defined = ssid in self.networks # T/F value
ssid_info ={"current_ap":current_ap, "locked":locked, "strength":strength,
"defined":defined, "ssid":ssid}
self.scroll.append(ssid_info)
if len(self.scroll) == 0: # Add a dummy entry
self.scroll = [{"current_ap":False, "locked":False,
"strength":"0", "defined":False, "ssid":'No wifi'}]
def wifi_help(self):
term_help = "(h)elp, (n)ext, (b)efore, (s)elect, (r)emove, " + \
"(m)anage netwk, (q)uit:"
pax_help =[ "2=Back AP",
"5=Next AP",
"3=Select AP",
"6=Manage AP",
"9=Remove defn",
"*=Exit Config" ]
return term_help, pax_help
def _change_ap(self, ssid):
inactive = {"INACTIVE", "INTERFACE_DISABLED", "DISCONNECTED"}
self.select_network(ssid)
while True:
status = self.status()
print(status)
if status["wpa_state"] in inactive or \
("ip_address" in status and status["ip_address"][0:7] == "169.254"):
# Bad pw, addess, or something \
self.display(myprint="Select failed", \
linesegments=["Select failed"], delay=2)
return
if "ssid" in status and "ip_address" in status:
linesegments = [cap("AP:" + status["ssid"], 16)]
linesegments.append(status["ip_address"])
self.display(myprint="", linesegments=linesegments, delay=3)
return
# ... expect status["wpa_state"] == "COMPLETED" or "ASSOCIATING":
self.display(myprint="", \
linesegments=["Select pending"], delay=0)
time.sleep(1)
def wifi_config(self):
s_line = 0
linesegments=["wifi config>"]
linesegments.append("0=Help Config")
self.display(myprint="(h)elp, (n)ext, (b)efore, (s)elect, (r)emove, " + \
"(m)anage netwk, (q)uit:", \
linesegments=linesegments, delay=2)
self.build_scroll()
while True:
self.display(myprint=self.build_term_line(self.scroll[s_line]), \
linesegments=self.build_pax_line(self.scroll[s_line]), \
delay=0)
key = self.getkey()
# Help
if key in ("0", "h"):
myprint, linesegments = self.wifi_help()
self.display(myprint=myprint, \
linesegments=linesegments, \
delay=3)
# Next
elif key in ("n", "2", "\r", " "):
s_line += 1
if s_line >= len(self.scroll):
self.build_scroll() # rebuild scroll as we rollover
s_line = 0 # to bring in updates
# Back
elif key in ("b", "5"):
s_line = s_line - 1 if s_line > 0 else len(self.scroll) - 1
# Select network
elif key in ("s", "3"):
ssid = self.scroll[s_line]["ssid"]
if not self.scroll[s_line]["defined"]:
locked = self.scroll[s_line]["locked"]
if self.add_network(ssid, locked):
self.list_networks() # update networks dict
self.scroll[s_line]["defined"] = True
else:
continue
self._change_ap(ssid)
self.build_scroll()
s_line = min(s_line, len(self.scroll)-1) # in case we see
# fewer wifi units out there
# after the build_scroll.
# Remove network from wpa_supplicant.conf
elif key in ("r", "9"):
if self.scroll[s_line]["current_ap"]:
self.display(myprint="Can't remove active network", \
linesegments=["Can't remove", "active network"], delay=2)
continue
ssid = self.scroll[s_line]["ssid"]
if ssid in self.networks:
self.remove_network(self.scroll[s_line]["ssid"])
self.scroll[s_line]["defined"] = False
self.list_networks()
else:
self.display(myprint="ssid not in definitions", \
linesegments=["ssid not in", "definitions"], delay=2)
# Manage networks
elif key in ("m", "6"):
ssid = self.scroll[s_line]["ssid"]
if self.scroll[s_line]["defined"]:
password = ""
if self.scroll[s_line]["locked"]:
try:
# password = input("Network password: ")
password = self.get_network_password()
if len(password) == 0:
continue
except KeyboardInterrupt:
continue
id = self.networks[ssid]
result = self.wpa_cli("set_network", id, parm='psk "'+password+'"')
if len(password) == 0:
result = self.wpa_cli\
("set_network", id, parm='key_mgmt NONE')
else:
result = self.wpa_cli("set_network", id, parm='key_mgmt WPA-PSK')
result = self.wpa_cli("save_config")
elif self.add_network(ssid, \
locked=self.scroll[s_line]["locked"]):
# captures password in add_network.
self.list_networks() # update networks dict
self.scroll[s_line]["defined"] = True
else:
pass # not defined and failed add_network (^C) >> ignore
# Quit, Exit
elif key in ("q", "x", "*"):
break
# Unrecognized/Repeat
else:
self.display(myprint="", linesegments=[], delay=0)
if (__name__ == "__main__"):
wifi = Wifi()
try:
while True:
# keypad.wait_keypress()
# print(keypad.key, keypad.cseconds, keypad.keytype)
cli = input('scan, list_n, add, remove, select, status, ' +
'reconfigure, all, w/config, quit: ').strip()
if len(cli) == 0:
continue
elif cli == "scan"[0:len(cli)]:
ssids = wifi.scan_results()
print(ssids)
elif cli == "list_n"[0:len(cli)]:
wifi.list_networks()
print(wifi.networks)
elif cli == "add"[0:len(cli)]:
result = wifi.add_network()
print(result)
wifi.list_networks()
print(wifi.networks)
elif cli == "remove"[0:len(cli)]:
result = wifi.remove_network()
print(result)
wifi.list_networks()
print(wifi.networks)
elif cli == "select"[0:len(cli)]:
result = wifi.select_network()
print(result)
status = wifi.status()
print(status)
elif cli == "status"[0:len(cli)]:
status = wifi.status()
print(status)
elif cli == "reconfigure"[0:len(cli)]:
result = wifi.reconfigure()
print(result)
wifi.list_networks()
print(wifi.networks)
elif cli == "all"[0:len(cli)]:
wifi.build_scroll()
for line in wifi.scroll:
print(line["line"])
elif cli == "w/config"[0:len(cli)]:
wifi.wifi_config()
elif cli == "quit"[0:len(cli)]:
break
except KeyboardInterrupt:
print()
|
import tinput
def test_main():
assert tinput # use your library here
|
from autoconf import conf
import autofit as af
from autogalaxy.galaxy import galaxy as g
from autofit.tools.phase import Dataset
from autogalaxy.pipeline.phase import abstract
import numpy as np
class HyperPhase:
def __init__(
self,
phase: abstract.AbstractPhase,
hyper_search,
model_classes=tuple(),
hyper_image_sky=None,
hyper_background_noise=None,
hyper_galaxy_names=None,
):
"""
Abstract HyperPhase. Wraps a phase, performing that phase before performing the action
specified by the run_hyper.
Parameters
----------
phase
A phase
"""
self.phase = phase
self.hyper_search = hyper_search
self.model_classes = model_classes
self.hyper_image_sky = hyper_image_sky
self.hyper_background_noise = hyper_background_noise
self.hyper_galaxy_names = hyper_galaxy_names
@property
def hyper_name(self):
return "hyper"
def make_model(self, instance):
model = instance.as_model(self.model_classes)
model.hyper_image_sky = self.hyper_image_sky
model.hyper_background_noise = self.hyper_background_noise
return model
def add_hyper_galaxies_to_model(
self, model, path_galaxy_tuples, hyper_galaxy_image_path_dict
):
for path_galaxy, galaxy in path_galaxy_tuples:
if path_galaxy[-1] in self.hyper_galaxy_names:
if not np.all(hyper_galaxy_image_path_dict[path_galaxy] == 0):
if "source" in path_galaxy[-1]:
setattr(
model.galaxies.source,
"hyper_galaxy",
af.PriorModel(g.HyperGalaxy),
)
elif "lens" in path_galaxy[-1]:
setattr(
model.galaxies.lens,
"hyper_galaxy",
af.PriorModel(g.HyperGalaxy),
)
return model
def make_hyper_phase(self) -> abstract.AbstractPhase:
"""
Returns
-------
hyper_phase
A copy of the original phase with a modified name and path
"""
self.phase.search = self.hyper_search.copy_with_name_extension(
extension=self.phase.name, path_prefix=self.phase.paths.path_prefix
)
self.phase.hyper_name = self.hyper_name
return self.phase
def run(
self,
dataset: Dataset,
results: af.ResultsCollection,
info=None,
pickle_files=None,
**kwargs,
) -> af.Result:
"""
Run the hyper phase and then the hyper_galaxies phase.
Parameters
----------
dataset
Data
results
Results from previous phases.
kwargs
Returns
-------
result
The result of the phase, with a hyper_galaxies result attached as an attribute with the hyper_name of this
phase.
"""
result = self.phase.run(
dataset, results=results, info=info, pickle_files=pickle_files, **kwargs
)
results.add(self.phase.paths.name, result)
hyper_result = self.run_hyper(
dataset=dataset,
results=results,
info=info,
pickle_files=pickle_files,
**kwargs,
)
setattr(result, self.hyper_name, hyper_result)
return result
def run_hyper(
self,
dataset,
results: af.ResultsCollection,
info=None,
pickle_files=None,
**kwargs,
):
"""
Run the phase, overriding the search's model instance with one created to
only fit pixelization hyperparameters.
"""
self.results = results
phase = self.make_hyper_phase()
model = self.make_model(instance=results.last.instance)
if self.hyper_galaxy_names is not None:
model = self.add_hyper_galaxies_to_model(
model=model,
path_galaxy_tuples=results.last.path_galaxy_tuples,
hyper_galaxy_image_path_dict=results.last.hyper_galaxy_image_path_dict,
)
phase.model = model
return phase.run(
dataset,
mask=results.last.mask,
results=results,
info=info,
pickle_files=pickle_files,
)
def __getattr__(self, item):
return getattr(self.phase, item)
|
import random
import copy
from Card import *
class Deck:
"""
Cards' Number in the initial deck
111 22 33 44 5 for each color
"""
def __init__(self, initial: list=None, _seed:int = None):
colors: tuple = (0, 1, 2, 3, 4)
numbers: tuple = (1, 1, 1, 2, 2, 3, 3, 4, 4, 5)
starter: list = [
Card(cardId=id)
for id in range(50)
]
random.seed(_seed)
if initial:
self._cards = copy.deepcopy(initial)
self._drew = []
# build removed cards list
for cardInitial in initial:
for cardStarter in starter:
if cardInitial.toShort() == cardStarter.toShort():
self._drew.append(cardInitial)
# self.remove(cardInitial)
else:
self._cards = starter
self._drew = []
def reprCards(self):
shorts = (card.toShort() for card in self._cards)
result = ""
for short in shorts:
result += f'{short} '
return result.rstrip()
def draw(self) -> Card:
if self.remainCardNum() == 0:
raise ValueError("no card remain in deck")
else:
index: int = random.randint(0, self.remainCardNum()-1)
drew = self._cards.pop(index)
self._drew.append(drew)
return drew
def remove(self, card: Card) -> Card:
for num, cardInDeck in enumerate(self._cards):
if cardInDeck.toShort() == card.toShort():
return self._cards.pop(num)
raise ValueError("the card not found tried remove")
def remainCardNum(self) -> int:
return len(self._cards)
def remainCardList(self) -> list:
return [card for card in self._cards]
def drewCardList(self) -> list:
return [card for card in self._drew]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Created: 07-2020 - Carmelo Mordini <carmelo> <carmelo.mordini@unitn.it>
"""Module docstring
"""
import numpy as np
from scipy.special import binom
def smooth_gradient_n(y, N, dx=1, pad_mode='edge', axis=-1):
# http://www.holoborodko.com/pavel/numerical-methods/numerical-derivative/smooth-low-noise-differentiators/
assert N % 2 == 1 # N must be odd
m = (N - 3) // 2
M = (N - 1) // 2
cks = (2**(-(2 * m + 1)) * (binom(2 * m, m - k + 1)
- binom(2 * m, m - k - 1))
for k in range(1, M + 1))
pw = [(0, 0)] * y.ndim
pw[axis] = (N, N)
y = np.pad(y, pad_width=pw, mode=pad_mode)
if np.ndim(dx) != 0:
# FIXME this actually assumes that x has the same dimensions as y
x = np.pad(dx, pad_width=pw, mode=pad_mode)
even = False
else:
even = True
d = np.zeros_like(y)
for j, ck in enumerate(cks):
k = j + 1
dx = dx if even else (np.roll(x, -j - 1, axis) -
np.roll(x, j + 1, axis)) / 2 / k
d += ck * (np.roll(y, -j - 1, axis) - np.roll(y, j + 1, axis)) / dx
S = [slice(None)] * y.ndim
S[axis] = slice(N, -N)
y_grad = d[tuple(S)]
return y_grad
|
#!/usr/bin/env python
#
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ----------------------------------------------------------------------
#
## @file euler/grabfaces
## @brief Python application to grab a set of points specified in a UCD
## face description file along with the associated normals and write them to a
## file.
import math
import numpy
from pyre.applications.Script import Script as Application
class GrabFaces(Application):
"""
Python application to grab a set of point coordinates and normals from a UCD
face description file.
"""
class Inventory(Application.Inventory):
"""
Python object for managing GrabFaces facilities and properties.
"""
## @class Inventory
## Python object for managing GrabFaces facilities and properties.
##
## \b Properties
## @li \b ucd_face_file Filename of input UCD face description file.
## @li \b fault_id_num ID number (material number) of fault to use.
## @li \b point_output_file Filename of output set of points and normals.
## @li \b node_values_list List specifying position of desired attributes in UCD face nodal attributes.
## @li \b exclude_zero_normals Flag indicating whether to exclude points if the associated normal has zero magnitude.
##
## \b Facilities
## @li None
import pyre.inventory
ucdFaceFile = pyre.inventory.str("ucd_face_file", default="test_face.inp")
ucdFaceFile.meta['tip'] = "Filename of ucd file containing face descriptions."
faultIDNum = pyre.inventory.int("fault_id_num", default=1)
faultIDNum.meta['tip'] = "ID number (material number) of fault to use."
pointOutputFile = pyre.inventory.str("point_output_file",
default="points.coordnorm")
pointOutputFile.meta['tip'] = "Filename of output coordinates and normals."
nodeValuesList = pyre.inventory.list("node_values_list", default=[1, 2, 3])
nodeValuesList.meta['tip'] = "Position of desired values in UCD face nodal attributes."
excludeZeroNormals = pyre.inventory.bool("exclude_zero_normals",
default=False)
excludeZeroNormals.meta['tip'] = "Whether to exclude points with zero normals."
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="grabfaces"):
Application.__init__(self, name)
return
def main(self):
# import pdb
# pdb.set_trace()
self._grabFaces()
return
# PRIVATE METHODS ////////////////////////////////////////////////////
def _configure(self):
"""
Setup members using inventory.
"""
Application._configure(self)
self.ucdFaceFile = self.inventory.ucdFaceFile
self.faultIDNum = self.inventory.faultIDNum
self.pointOutputFile = self.inventory.pointOutputFile
self.nodeValuesList = self.inventory.nodeValuesList
self.excludeZeroNormals = self.inventory.excludeZeroNormals
return
def _grabFaces(self):
"""
Reads vertex coordinates, connectivities, and vertex attributes from a
UCD file.
"""
f = file(self.ucdFaceFile)
lines = f.readlines()
fileLen = len(lines)
firstline = lines[0].split()
numVerts = int(firstline[0])
numCells = int(firstline[1])
numVertAttrs = int(firstline[2])
vertCoords = []
# Get vertex coordinates
for lineCount in range(1, numVerts+1):
data = lines[lineCount].split()
for dim in range(1,4):
vertCoords.append(float(data[dim]))
# Get cell connectivities
faultVerts = []
lineBegin = 1 + numVerts
lineEnd = lineBegin + numCells
firstCellLine = lines[lineBegin].split()
cellType = str(firstCellLine[2])
if cellType == "tri":
numVertsPerCell = 3
else:
numVertsPerCell = 4
for lineCount in range(lineBegin, lineEnd):
data = lines[lineCount].split()
cellMat = int(data[1])
if cellMat == self.faultIDNum:
for vert in range(3, 2 + numVertsPerCell):
vertNum = int(data[vert])
if vertNum not in faultVerts:
faultVerts.append(vertNum)
faultVerts.sort()
numFaultVerts = len(faultVerts)
# read normals/values and write out the selected values.
o = open(self.pointOutputFile, 'w')
lineBegin = 2 + numVerts + numCells + numVertAttrs
lineEnd = lineBegin + numVerts
vertInd = 0
ucdInd = 1
coordCount = 0
normals = [0.0, 0.0, 0.0]
v0 = int(self.nodeValuesList[0])
v1 = int(self.nodeValuesList[1])
v2 = int(self.nodeValuesList[2])
for lineCount in range(lineBegin, lineEnd):
vertex = faultVerts[vertInd]
if vertex == ucdInd:
data = lines[lineCount].split()
normals = [float(data[v0]), float(data[v1]), float(data[v2])]
outputPoint = not self.excludeZeroNormals
outputPoint = outputPoint or \
normals[0] != 0.0 or \
normals[1] != 0.0 or \
normals[2] != 0.0
if outputPoint:
for dim in range(3):
o.write(' %.12e' % vertCoords[coordCount + dim])
for dim in range(3):
o.write(' %.12e' % normals[dim])
o.write('\n')
vertInd += 1
coordCount += 3
if vertInd == numFaultVerts: break
ucdInd += 1
f.close()
o.close()
return
# ----------------------------------------------------------------------
if __name__ == '__main__':
app = GrabFaces()
app.run()
# End of file
|
import os
import sys
from GenomicsQueries import Queries
from BigQueryClient import BigQuery
from config import Config
from GoogleGenomicsClient import GoogleGenomicsClient
import logging
import time
import json
from pyflow import WorkflowRunner
class GenomicsQC(object):
def __init__(self, verbose=False, client_secrets=None, project_number=None, dataset=None, variant_table=None,
expanded_table=None):
# Set global variables
self.query_repo = Config.QUERY_REPO
if variant_table is None:
variant_table = Config.VARIANT_TABLE
self.variant_table = variant_table
if expanded_table is None:
expanded_table = Config.EXPANDED_TABLE
self.expanded_table = expanded_table
if client_secrets is None:
client_secrets = Config.CLIENT_SECRETS
self.client_secrets_path = client_secrets
if project_number is None:
project_number = Config.PROJECT_NUMBER
self.project_number = project_number
if dataset is None:
dataset = Config.DATASET
self.dataset = dataset
qc_dataset = Config.QC_DATASET
project_name = Config.PROJECT_NAME
# Set up logging
self.date = time.strftime("%Y%m%d-%H%M%S")
self.setup_log(verbose)
# Set up API clients
self.bq = BigQuery(project_number=self.project_number,
client_secrets=self.client_secrets_path,
project_name=project_name,
qc_dataset=qc_dataset)
self.gg = GoogleGenomicsClient(client_secrets=self.client_secrets_path,
project_number=self.project_number,
dataset=self.dataset)
self.queries = QCSteps(verbose=verbose, client_secrets=self.client_secrets_path,
project_number=self.project_number, dataset=self.dataset,
variant_table=self.variant_table, expanded_table=self.expanded_table)
#### Specific types of QC functions ####
# Sample level QC
def sample_qc(self, remove=False):
logging.info("Running Sample Level QC")
failed_samples = self.queries.sample_level_qc()
if remove is True:
self.remove_failed_samples(failed_samples)
self.print_removed(failed_samples, 'samples')
# Variant level QC
def variant_qc(self, poll=False):
logging.info("Running Variant Level QC")
job_ids = self.queries.variant_level_qc()
if poll is True:
logging.debug("Waiting for query completion")
self.poll_jobs(job_ids)
# Execute a custom list of queries
def custom_list(self, qc_list):
for s in qc_list:
try:
method = getattr(self.queries, s)
result = method()
print json.dumps(result, sort_keys=True, indent=2)
except Exception as e:
print "%s not a valid qc method!" % s
#### Functions for removing based on provided files ####
def remove_samples_from_file(self, file):
samples = self.sample_file_to_dict(file)
self.remove_failed_samples(samples)
def remove_positions_from_file(self, file):
positions = self.position_file_to_dict(file)
self.remove_failed_positions(positions)
# Accumulate samples from file for removal
def sample_file_to_dict(self, file):
samples = {}
for line in open(file) :
samples[line.rstrip('\n')] = ''
return samples
# Accumulate positions from file for removal
def position_file_to_dict(self, file):
positions = {}
for line in open(file) :
p = re.sub(r'\t', "/", line.rstrip('\n'))
positions[p] = ''
return positions
#### Functions to remove each type of failure ####
# Remove samples from variant set given a dictionary of samples. sample_id is dictionary key
def remove_failed_samples(self, failed):
logging.debug("Removing failed samples.")
for s in failed:
self.remove_sample(s)
self.print_removed(failed, "samples")
# Remove positions from variant set given a dictionary of positions. position is dictionary key
def remove_failed_positions(self, failed):
logging.debug("Removing failed positions.")
for p in failed:
reference_name, start, end = p.split("/")
self.remove_variant(reference_name, start, end)
self.print_removed(failed, "positions")
# Remove sample calls from variant set given a dictionary of sample positions. sample-call is dictionary key
def remove_failed_sample_calls(self, failed):
logging.debug("Removing failed calls.")
for p in failed:
sample_id, reference_name, start, end = p.split("/")
self.remove_sample_call(sample_id, reference_name, start, end)
self.print_removed(failed, "calls")
#### Google Genomics functions ####
# Remove a single sample from a variant set
def remove_sample(self, sample_id):
logging.debug("Removing sample: %s" % sample_id)
call_set_id = self.gg.get_call_set_id(sample_id)
if call_set_id is None:
logging.error("Failed to retrieve call set id for %s." % sample_id)
return None
logging.debug("callset id: %s" % call_set_id)
self.gg.delete_call_set(call_set_id)
# Remove an entire position from a variant set
def remove_variant(self, reference_name, start, end):
logging.debug("Removing position: %s %s %s" % (reference_name, start, end))
variant_id = self.gg.get_variant_id(reference_name=reference_name, start=start, end=end)
if variant_id is None:
logging.error("Failed to retrieve variant id for %s %s %s" % (reference_name, start, end))
self.gg.delete_variant(variant_id)
# Remove a variant of a specific call from a variant set
def remove_sample_call(self, sample_id, reference_name, start, end):
logging.debug("Removing call: %s %s %s %s" % (sample_id, reference_name, start, end))
# todo figure this out
return None
#### Printing ####
def print_removed(self, failed, type):
file = "failed_%s.%s.tsv" % (type, self.date)
f = open(file,'a')
for r in failed:
f.write("%s\t%s\n" % (r, ",".join(failed[r])))
f.close()
#### Miscellaneous functions ####
# Check if a path exists
def check_path(self, file):
if not os.path.exists(file):
raise Exception("%s not found!" % file)
def setup_log(self, verbose):
log_level = ''
if verbose is True:
log_level = logging.DEBUG
else:
log_level = logging.INFO
# todo write to file
logging.basicConfig(filename='genomics-qc.%s.log' % self.date,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s', level=log_level)
# Check for job status given a set of BigQuery ids
def poll_jobs(self, ids):
print "Waiting for all queries to complete"
for query in ids:
id = ids[query]
logging.debug("Entering polling for %s" % query)
result = self.bq.poll_job(id)
logging.info("Query complete: %s %s" % (query, result))
if result != 'DONE':
print "Query failed: %s %s" % (query, result)
logging.debug("Polling complete")
'''
QCSteps Class
Self contained functions for executing QC queries on BigQuery. Individual queries can be executed as well as all
sample level or variant level qcs.
'''
class QCSteps(object):
def __init__(self, verbose=False, client_secrets=None, project_number=None, dataset=None, variant_table=None,
expanded_table=None):
# Set global variables
self.query_repo = Config.QUERY_REPO
if variant_table is None:
variant_table = Config.VARIANT_TABLE
self.variant_table = variant_table
if expanded_table is None:
expanded_table = Config.EXPANDED_TABLE
self.expanded_table = expanded_table
if client_secrets is None:
client_secrets = Config.CLIENT_SECRETS
self.client_secrets_path = client_secrets
if project_number is None:
project_number = Config.PROJECT_NUMBER
self.project_number = project_number
if dataset is None:
dataset = Config.DATASET
self.dataset = dataset
qc_dataset = Config.QC_DATASET
project_name = Config.PROJECT_NAME
# Set up API clients
self.bq = BigQuery(project_number=self.project_number,
client_secrets=self.client_secrets_path,
project_name=project_name,
qc_dataset=qc_dataset)
self.gg = GoogleGenomicsClient(client_secrets=self.client_secrets_path,
project_number=self.project_number,
dataset=self.dataset)
self.failed_samples = {}
'''
Execute all sample level qc steps
A dictionary containing the ids of samples failing any qc steps and the qcs they failed will be returned
'''
def sample_level_qc(self):
#self.__collect_failed_samples(self.gender_check(), "gender_check")
#self.__collect_failed_samples(self.genotyping_concordance(), "genotype_concordance")
self.__collect_failed_samples(self.heterozygosity_rate(), "heterozygosity_rate")
self.__collect_failed_samples(self.inbreeding_coefficient(), "inbreeding_coefficient")
self.__collect_failed_samples(self.missingness_rate(), "missingness_rate")
self.__collect_failed_samples(self.singletons(), "private_variants")
return self.failed_samples
'''
Execute all variant level qc steps
As some of these queries can return very large results they are output to a table by default.
A list of job ids is returned that can be used to check the query status.
'''
def variant_level_qc(self, save_to_table=True):
ids = {}
ids["blacklisted"] = self.blacklisted(save_to_table)
ids["hardy_weinberg"] = self.hardy_weinberg(save_to_table)
ids["heterozygous_haplotype"] = self.heterozygous_haplotype(save_to_table)
ids["variant_missingness"] = self.missingness_variant_level(save_to_table)
#ids["titv_by_alts"] = self.titv_by_alternate_allele_counts(save_to_table)
#ids["titv_by_depth"] = self.titv_by_depth(save_to_table)
#ids["titv_by_genomic_window"] = self.titv_by_genomic_window(save_to_table)
return ids
'''
SAMPLE LEVEL QC Queries
'''
'''
Gender Check
Gender is inferred for each genome by calculating the heterozygosity rate on the X chromosome. Genomes who's
inferred sex is different from that of the reported sex are removed from the cohort. Although it is possible for
people to be genotypically male and phenotypically female, it is more likely that samples and phenotypic records
were mislabeled.
'''
def gender_check(self):
print "Running Gender Check"
query_file = Queries.GENDER_CHECK
query = self.__prepare_query(query_file)
return self.bq.run(query, self.__query_name(query_file), False)
'''
Genotyping Concordance
We next want to look at the concordance between SNPs called from the sequencing data and those called through the
use genotyping. This allows us to identify samples that may have been mixed up in the laboratory. Any genomes
with a concordance less than 0.95 are returned
'''
def genotyping_concordance(self):
print "Running Genotype Concordance"
query_file = Queries.GENOTYPING_CONCORDANCE
query = self.__prepare_query(query_file)
return self.bq.run(query, self.__query_name(query_file), False)
'''
Heterozygosity Rate
Heterozygosity rate is defined as the number of heterozygous calls in a genome. Genomes with a heterozygosity rate
more than 3 standard deviations away from the mean are returned.
'''
def heterozygosity_rate(self):
print "Running Heterozygosity Rate"
prequery_file = Queries.HETEROZYGOSITY_METRICS
cutoffs = self.__three_sigma_cutoffs(prequery_file)
query_file = Queries.HETEROZYGOSITY
query = self.__prepare_query(query_file, cutoffs)
return self.bq.run(query, self.__query_name(query_file), False)
'''
Inbreeding Coefficient
The inbreeding coefficient (F) is a measure of expected homozygosity rates vs observed homozygosity rates for
individual genomes. Here, we calculate the inbreeding coefficient using the method-of-moments estimator. Genomes
with an inbreeding coefficient more than 3 standard deviations away from the mean are removed from the cohort.
'''
def inbreeding_coefficient(self):
print "Running Inbreeding Coefficient"
prequery_file = Queries.INBREEDING_COEFFICIENT_METRICS
cutoffs = self.__three_sigma_cutoffs(prequery_file)
query_file = Queries.INBREEDING_COEFFICIENT
query = self.__prepare_query(query_file, cutoffs)
return self.bq.run(query, self.__query_name(query_file), False)
'''
Missingness Rate
Missingess is defined as the proportion of sites found in the reference genome that are not called in a given
genome. We calculate the missingness rate of each genome in our cohort in order to identify samples that are
potentially low quality. If a sample has a high missingness rate it may be indicative of issues with sample
preparation or sequencing. Genomes with a missingness rate greater than 0.1 are returned.
'''
def missingness_rate(self):
print "Running Sample Level Missingness Rate"
query_file = Queries.MISSINGNESS_SAMPLE_LEVEL
query = self.__prepare_query(query_file)
return self.bq.run(query, self.__query_name(query_file), False)
'''
Singleton Rate
Singleton rate is defined as the number of variants that are unique to a genome. If a variant is found in only one
genome in the cohort it is considered a singleton. Genomes with singleton rates more than 3 standard deviations away
from the mean are returned.
'''
def singletons(self):
print "Running Private Variants"
prequery_file = Queries.PRIVATE_VARIANT_METRICS
cutoffs = self.__three_sigma_cutoffs(prequery_file)
query_file = Queries.PRIVATE_VARIANTS
query = self.__prepare_query(query_file, cutoffs)
return self.bq.run(query, self.__query_name(query_file), False)
'''
VARIANT LEVEL QC
By default all output is directed to a BigQuery table as the results may be very large.
'''
'''
Blacklisted Variants
Identify all variants within our cohort that have been blacklisted. For more information on what variants are
blacklisted and why see here: https://sites.google.com/site/anshulkundaje/projects/blacklists
By default all output is directed to a BigQuery table as the results may be very large. Output is directed to
qc_tables.blacklisted_variants
'''
def blacklisted(self, save_to_table=False):
print "Running Blacklisted"
query_file = Queries.BLACKLISTED
query = self.__prepare_query(query_file)
return self.bq.run(query, self.__query_name(query_file), save_to_table)
'''
Hardy-Weinberg Equilibrium
For each variant, compute the expected versus observed relationship between allele frequencies and genotype
frequencies per the Hardy-Weinberg Equilibrium.
By default all output is directed to a BigQuery table as the results may be very large. Output is directed to
qc_tables.hwe_fail
'''
def hardy_weinberg(self, save_to_table=False):
print "Running Hardy-Weinberg Equilibrium"
prequery_file = Queries.HARDY_WEINBERG_METRICS
cutoffs = self.__cutoff(prequery_file)
query_file = Queries.HARDY_WEINBERG
query = self.__prepare_query(query_file, cutoffs)
print query
return self.bq.run(query, self.__query_name(query_file), save_to_table)
'''
Heterozygous Haplotype
For each variant within the X and Y chromosome, identify heterozygous variants in male genomes. All positions
with heterozygous positions outside the pseudo autosomal regions in male genomes are returned.
By default all output is directed to a BigQuery table as the results may be very large. Output is directed to
qc_tables.sex_chromosome_heterozygous_haplotypes
'''
def heterozygous_haplotype(self, save_to_table=False):
print "Running Heterozygous Haplotype"
query_file = Queries.HETERZYGOUS_HAPLOTYPE
query = self.__prepare_query(query_file)
return self.bq.run(query, self.__query_name(query_file), save_to_table)
'''
Variant Level Missingness
For each variant, compute the missingness rate. This query can be used to identify variants with a poor call rate.
By default all output is directed to a BigQuery table as the results may be very large. Output is directed to
qc_tables.variant_level_missingness_fail
'''
def missingness_variant_level(self, save_to_table=False):
print "Running Variant Level Missingness"
query_file = Queries.MISSINGNESS_VARIANT_LEVEL
query = self.__prepare_query(query_file)
return self.bq.run(query, self.__query_name(query_file), save_to_table)
'''
Ti/Tv By Alternate Allele Counts
Check whether the ratio of transitions vs. transversions in SNPs appears to be resonable across the range of rare
variants to common variants. This query may help to identify problems with rare or common variants.
By default all output is directed to a BigQuery table as the results may be very large. Output is directed to
qc_tables.titv_alternate_alleles
'''
def titv_by_alternate_allele_counts(self, save_to_table=False):
print "Running Ti/Tv By Alternate Allele Counts"
#todo
return
'''
Ti/Tv By Depth
Check whether the ratio of transitions vs. transversions in SNPs is within a set range at each sequencing depth for
each sample.
By default all output is directed to a BigQuery table as the results may be very large. Output is directed to
qc_tables.titv_by_depth_fail
'''
def titv_by_depth(self, save_to_table=False):
print "Running Ti/Tv By Depth"
query_file = Queries.TITV_DEPTH
query = self.__prepare_query(query_file)
return self.bq.run(query, self.__query_name(query_file), save_to_table)
'''
Ti/Tv By Genomic Window
Check whether the ratio of transitions vs. transversions in SNPs appears to be reasonable in each window of genomic
positions. This query may help identify problematic regions.
By default all output is directed to a BigQuery table as the results may be very large. Output is directed to
qc_tables.titv_by_genomic_window_fail
'''
def titv_by_genomic_window(self, save_to_table=False):
print "Running Ti/Tv By Genomic Window"
query_file = Queries.TITV_GENOMIC_WINDOW
query = self.__prepare_query(query_file)
return self.bq.run(query, self.__query_name(query_file), save_to_table)
'''
Private functions used to prepare queries for execution
'''
# Set up the query, read it in, apply substitutions
def __prepare_query(self, query_file, cutoffs={}):
logging.debug("Preparing query: %s" % query_file)
raw_query = self.__get_query(query_file)
all_subs = {}
presets = self.__get_preset_cutoffs(query_file)
all_subs.update(presets)
all_subs.update(cutoffs)
#print Queries.MAIN_QUERY
if query_file in Queries.MAIN_QUERY:
main_query = Queries.MAIN_QUERY[query_file]
#print main_query
main = self.__prepare_query(main_query)
#print main
all_subs['_MAIN_QUERY_'] = main
query = self.__query_substitutions(raw_query, other=all_subs)
#print all_subs
return query
# Recursively substitute placeholders in each query. Recursion is required because in some cases items that
# substituted also contain placeholders.
def __query_substitutions(self, query, other=None):
replacements = {
"_THE_TABLE_": Config.VARIANT_TABLE,
"_THE_EXPANDED_TABLE_": Config.EXPANDED_TABLE,
"_PATIENT_INFO_": Config.PATIENT_INFO,
"_GENOTYPING_TABLE_": Config.GENOTYPING_TABLE
}
replacements.update(other)
count = 0
for r in replacements:
count += query.count(r)
if (count == 0):
return query
for r in replacements:
query = query.replace(r, replacements[r])
# Recursively make substitutions
return self.__query_substitutions(query, other)
# Check if a query requires a main query substitution
def __main_query(self, query_file):
if query_file in Queries.MAIN_QUERY:
main_query = Queries.MAIN_QUERY[query_file]
prepped_main = self.prepare_query(main_query)
return prepped_main
return None
# Read raw query in from file
def __get_query(self, file):
path = os.path.join(self.query_repo, file)
query = ''
with open (path, "r") as f:
query = f.read()
return query
# Get the base name of the query file and make some replacements
def __query_name(self, query_file):
query_name = query_file.split('.')[0]
query_name = query_name.replace("-", "_")
return query_name
# Get preset cutoffs from query file
def __get_preset_cutoffs(self, query):
cutoffs = {}
if query in Queries.PRESET_CUTOFFS:
cutoffs = Queries.PRESET_CUTOFFS[query]
return cutoffs
# Determine cutoffs for methods that require cutoffs set at three standard deviations from the mean
def __three_sigma_cutoffs(self, query_file):
logging.debug("Getting average and standard deviation")
query = self.__prepare_query(query_file)
query_name = self.__query_name(query_file)
result = self.bq.run(query, query_name=query_name)
average, stddev = self.__get_average_stddev(result)
logging.debug("Average: %s, Standard Deviation: %s" % (average, stddev))
max, min = self.__calculate_max_min(average, stddev)
logging.debug("Max: %s, Min: %s" % (max, min))
substitutions = self.__create_max_min_substitutions(max, min)
return substitutions
# Get the average and standard deviation from a query result
def __get_average_stddev(self, result):
for r in result:
average = r['average']
stddev = r['stddev']
return average, stddev
# Calculate the 3 sigma rule given an average and standard deviation
def __calculate_max_min(self, average, stddev):
max = float(average) + (3 * float(stddev))
min = float(average) - (3 * float(stddev))
return max, min
# Create a dictionary of substitutions given a max and min
def __create_max_min_substitutions(self, max, min):
dict = {
"_MAX_VALUE_": "%s" % max,
"_MIN_VALUE_": "%s" % min,
}
return dict
# Get cutoff from prequery
def __cutoff(self, query_file):
logging.debug("Getting cutoff")
query = self.__prepare_query(query_file)
#print query
query_name = self.__query_name(query_file)
result = self.bq.run(query, query_name=query_name)
for r in result:
cutoff = r['cutoff']
substitution = {"_CUTOFF_": cutoff}
return substitution
return {}
# Add failed samples to total
def __collect_failed_samples(self, result, query):
logging.debug("Collecting failed samples.")
if result is None:
return
for r in result:
sample_id = r['sample_id']
if sample_id in self.failed_samples:
self.failed_samples[sample_id].append(query)
else:
self.failed_samples[sample_id] = [query]
|
# SensorFusion API
# Import packages
import os
import argparse
import cv2
import numpy as np
import sys
import time
from threading import Thread
import importlib.util
from flask import Flask, jsonify, request, render_template
app = Flask(__name__)
app.config("DEBUG") = True
#############################
# Additional code goes here #
#############################
@app.route('/')
def home_page():
example_embed='This string is from python'
return render_template('index.html', embed=example_embed)
######### run app #########
app.run(debug=True)
|
"""
This plugin adds several convenient aliases, to shorten
roles/affiliation management.
Aliases defined
---------------
All those commands take a nick or a JID as a parameter.
For roles
~~~~~~~~~
.. glossary::
:sorted:
/visitor
/mute
Set the role to ``visitor``
/participant
Set the role to ``participant``
/moderator
/op
Set the role to ``moderator``
For affiliations
~~~~~~~~~~~~~~~~
.. glossary::
:sorted:
/admin
Set the affiliation to ``admin``
/member
/voice
Set the affiliation to ``member``
/noaffiliation
Set the affiliation to ``none``
/owner
Set the affiliation to ``owner``
"""
from poezio.plugin import BasePlugin
from poezio.tabs import MucTab
from poezio.core.structs import Completion
class Plugin(BasePlugin):
"""
Adds several convenient aliases to /affiliation and /role:
/visitor
/participant
/moderator == /op
/member == /voice
/owner
/admin
/noaffiliation
"""
def init(self):
for role in ('visitor', 'participant' , 'moderator'):
self.api.add_tab_command(MucTab, role, self.role(role),
help='Set the role of a nick to %s' % role,
usage= '<nick>',
short='Set the role to %s' % role,
completion=self.complete_nick)
for aff in ('member', 'owner', 'admin'):
self.api.add_tab_command(MucTab, aff, self.affiliation(aff),
usage='<nick>',
help='Set the affiliation of a nick to %s' % aff,
short='Set the affiliation to %s' % aff,
completion=self.complete_nick)
self.api.add_tab_command(MucTab, 'noaffiliation', self.affiliation('none'),
usage='<nick>',
help='Set the affiliation of a nick to none.',
short='Set the affiliation to none.',
completion=self.complete_nick)
self.api.add_tab_command(MucTab, 'voice', self.affiliation('member'),
usage='<nick>',
help='Set the affiliation of a nick to member.',
short='Set the affiliation to member.',
completion=self.complete_nick)
self.api.add_tab_command(MucTab, 'op', self.role('moderator'),
usage='<nick>',
help='Set the role of a nick to moderator.',
short='Set the role to moderator.',
completion=self.complete_nick)
self.api.add_tab_command(MucTab, 'mute', self.role('visitor'),
usage='<nick>',
help='Set the role of a nick to visitor.',
short='Set the role to visitor.',
completion=self.complete_nick)
def role(self, role):
return lambda args: self.api.current_tab().command_role(args+' '+role)
def affiliation(self, affiliation):
return lambda args: self.api.current_tab().command_affiliation(
args+' '+affiliation)
def complete_nick(self, the_input):
tab = self.api.current_tab()
compare_users = lambda x: x.last_talked
word_list = [user.nick for user in sorted(tab.users, key=compare_users, reverse=True)\
if user.nick != tab.own_nick]
return Completion(the_input.auto_completion, word_list, '')
|
import argparse
import pandas as pd
import matplotlib.pyplot as plt
from dialog_eval import DialogEval
from domain import Domain
parser = argparse.ArgumentParser("Test dialogue evaluation against Elasticsearch index.")
parser.add_argument("--elasticsearch-uri", default="http://localhost:9200", required=False,
help="URI of elasticsearch server. (default: %(default)s)")
parser.add_argument("--elasticsearch-index", default="dialog-eval", required=False,
help="Name of the elasticsearch index. (default: %(default)s)")
parser.add_argument("--embedding-model", default="all-MiniLM-L12-v2", required=False,
help="Name of the sentence-transformers embedding model. (default: %(default)s)")
args = parser.parse_args()
print()
print("Running with arguments:")
print(args)
print()
dialogeval = DialogEval(args.elasticsearch_uri, args.elasticsearch_index,
embedding_model=args.embedding_model)
# Example 1
context = ["Oh my god, she's got a gun!",
"Don't move, I'll blow your head off!"]
utterances = [
"Please don't kill me!",
"Ahhh! Somebody help!",
"Go ahead, shoot me.",
"Right between the eyes please."
]
utterances_in_context = [context + [u] for u in utterances]
reference_utterances = ["I don't want to die!"] * len(utterances)
scores_zero_ref = dialogeval.score_utterances(utterances_in_context)
scores_single_ref = dialogeval.score_utterances(utterances_in_context, reference_utterances)
scores_df = (
pd.DataFrame({"response": utterances, "zero_ref": scores_zero_ref, "single_ref": scores_single_ref})
)
scores_df.plot.barh(x="response").invert_yaxis()
plt.xlabel("score")
# Example 2
context = ["Hi! How are you?",
"I need a room to stay for the night."]
utterances = [
"What happened to you?",
"Sorry, I can't help you.",
"Sure, the Ridge Hotel is rated 9/10 and has free Wifi.",
]
utterances_in_context = [context + [u] for u in utterances]
scores_general = dialogeval.score_utterances(utterances_in_context, domains=[Domain.GENERAL.value])
scores_hotels = dialogeval.score_utterances(utterances_in_context, domains=[Domain.HOTELS.value])
scores_df = (
pd.DataFrame({"response": utterances, "general": scores_general, "hotels": scores_hotels})
)
scores_df.plot.barh(x="response").invert_yaxis()
plt.xlabel("score")
# Example 3
utterances_in_context = [
["Hi! How are you?",
"Where the hell have you been?",
"Working hard."]
]
scores, details = dialogeval.score_utterances(utterances_in_context, return_details=True)
details_df = (
pd.DataFrame(details[0])[["nearest_utterances", "weighted_cosines"]]
.sort_values(by="weighted_cosines", ascending=False)
)
|
from reliapy.distributions.discrete._discrete import _Discrete
from reliapy.distributions.discrete._bernoulli import _Bernoulli
from reliapy.distributions.discrete._binomial import _Binomial
from reliapy.distributions.discrete._poisson import _Poisson
|
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright © 2016, Continuum Analytics, Inc. All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, print_function
def _default_show_url(url):
import webbrowser
webbrowser.open_new_tab(url)
def prepare_browser(project, stage, io_loop, show_url):
from tornado.ioloop import IOLoop
from conda_kapsel.internal.ui_server import UIServer, UIServerDoneEvent
result_holder = {}
old_current_loop = None
try:
old_current_loop = IOLoop.current()
if io_loop is None:
io_loop = IOLoop()
io_loop.make_current()
if show_url is None:
show_url = _default_show_url
def event_handler(event):
if isinstance(event, UIServerDoneEvent):
result_holder['result'] = event.result
io_loop.stop()
server = UIServer(project, stage, event_handler=event_handler, io_loop=io_loop)
try:
print("# Configure the project at {url} to continue...".format(url=server.url))
show_url(server.url)
io_loop.start()
finally:
server.unlisten()
finally:
if old_current_loop is not None:
old_current_loop.make_current()
if 'result' in result_holder:
return result_holder['result']
else:
from conda_kapsel.prepare import PrepareFailure
# this pretty much only happens in unit tests.
return PrepareFailure(logs=[],
statuses=(),
errors=["Browser UI main loop was stopped."],
environ=stage.environ,
overrides=stage.overrides)
|
from tkinter import Tk, ttk, LEFT, RIGHT, BOTTOM, TOP, BOTH, Grid, \
N, S, E, W, NW, NE, Button, Radiobutton, Label, Entry, IntVar, BooleanVar, StringVar, NORMAL, DISABLED
import os
import Game
import tkinter.messagebox as msgbox
WHITE = True
BLACK = False
class GUI:
def __init__(self, physInput):
self.physInput = physInput
def dispAIMove(self, moveStr):
msg = moveStr[5:7] + ' to ' + moveStr[7:]
msg += "\nPlease move my piece for me and press OK."
msgbox.showinfo("AI Move", msg)
self.physInput.promptCamera(True)
msg = "Make your move and press OK."
msgbox.showinfo("Player Move", msg)
def getPlayerMove(self):
def giveMove():
moveDir = os.path.dirname(os.path.realpath(__file__)) + '/../phys/'
filename = moveDir + 'playerMove.txt'
output = open(filename, "w")
output.write('1 ' + iFile.get() + iRank.get() + ' ' + fFile.get() + fRank.get())
output.close()
winMove.destroy()
return
winMove = Tk()
winMove.wm_title("Player Move")
winMove.config(width=480, height=300)
frames = {}
frames['spaces'] = ttk.Frame(winMove)
frames['spaces'].pack(side=TOP, pady=10)
frames['space_i'] = ttk.LabelFrame(frames['spaces'], text='From', labelanchor=NW, width=235, height=250)
frames['space_f'] = ttk.LabelFrame(frames['spaces'], text='To', labelanchor=NW, width=235, height=250)
frames['space_i'].pack(side=LEFT)
frames['space_f'].pack(side=RIGHT)
frames['space_i'].pack_propagate(False)
frames['space_f'].pack_propagate(False)
frames['space_iF'] = ttk.Frame(frames['space_i'], width=165, height=250)
frames['space_iR'] = ttk.Frame(frames['space_i'], width=165, height=250)
frames['space_fF'] = ttk.Frame(frames['space_f'], width=165, height=250)
frames['space_fR'] = ttk.Frame(frames['space_f'], width=165, height=250)
frames['space_iF'].pack(side=LEFT)
frames['space_iR'].pack(side=RIGHT)
frames['space_fF'].pack(side=LEFT)
frames['space_fR'].pack(side=RIGHT)
iFile = StringVar()
iRank = StringVar()
iFile.set('A')
iRank.set('1')
iF1 = Radiobutton(frames['space_iF'], text='A', variable=iFile, value='A')
iF2 = Radiobutton(frames['space_iF'], text='B', variable=iFile, value='B')
iF3 = Radiobutton(frames['space_iF'], text='C', variable=iFile, value='C')
iF4 = Radiobutton(frames['space_iF'], text='D', variable=iFile, value='D')
iF5 = Radiobutton(frames['space_iF'], text='E', variable=iFile, value='E')
iF6 = Radiobutton(frames['space_iF'], text='F', variable=iFile, value='F')
iF7 = Radiobutton(frames['space_iF'], text='G', variable=iFile, value='G')
iF8 = Radiobutton(frames['space_iF'], text='H', variable=iFile, value='H')
iF1.pack(anchor=NW)
iF2.pack(anchor=NW)
iF3.pack(anchor=NW)
iF4.pack(anchor=NW)
iF5.pack(anchor=NW)
iF6.pack(anchor=NW)
iF7.pack(anchor=NW)
iF8.pack(anchor=NW)
iR1 = Radiobutton(frames['space_iR'], text='1', variable=iRank, value='1')
iR2 = Radiobutton(frames['space_iR'], text='2', variable=iRank, value='2')
iR3 = Radiobutton(frames['space_iR'], text='3', variable=iRank, value='3')
iR4 = Radiobutton(frames['space_iR'], text='4', variable=iRank, value='4')
iR5 = Radiobutton(frames['space_iR'], text='5', variable=iRank, value='5')
iR6 = Radiobutton(frames['space_iR'], text='6', variable=iRank, value='6')
iR7 = Radiobutton(frames['space_iR'], text='7', variable=iRank, value='7')
iR8 = Radiobutton(frames['space_iR'], text='8', variable=iRank, value='8')
iR1.pack(anchor=E)
iR2.pack(anchor=E)
iR3.pack(anchor=E)
iR4.pack(anchor=E)
iR5.pack(anchor=E)
iR6.pack(anchor=E)
iR7.pack(anchor=E)
iR8.pack(anchor=E)
fFile = StringVar()
fRank = StringVar()
fFile.set('A')
fRank.set('2')
fF1 = Radiobutton(frames['space_fF'], text='A', variable=fFile, value='A')
fF2 = Radiobutton(frames['space_fF'], text='B', variable=fFile, value='B')
fF3 = Radiobutton(frames['space_fF'], text='C', variable=fFile, value='C')
fF4 = Radiobutton(frames['space_fF'], text='D', variable=fFile, value='D')
fF5 = Radiobutton(frames['space_fF'], text='E', variable=fFile, value='E')
fF6 = Radiobutton(frames['space_fF'], text='F', variable=fFile, value='F')
fF7 = Radiobutton(frames['space_fF'], text='G', variable=fFile, value='G')
fF8 = Radiobutton(frames['space_fF'], text='H', variable=fFile, value='H')
fF1.pack(anchor=NW)
fF2.pack(anchor=NW)
fF3.pack(anchor=NW)
fF4.pack(anchor=NW)
fF5.pack(anchor=NW)
fF6.pack(anchor=NW)
fF7.pack(anchor=NW)
fF8.pack(anchor=NW)
fR1 = Radiobutton(frames['space_fR'], text='1', variable=fRank, value='1')
fR2 = Radiobutton(frames['space_fR'], text='2', variable=fRank, value='2')
fR3 = Radiobutton(frames['space_fR'], text='3', variable=fRank, value='3')
fR4 = Radiobutton(frames['space_fR'], text='4', variable=fRank, value='4')
fR5 = Radiobutton(frames['space_fR'], text='5', variable=fRank, value='5')
fR6 = Radiobutton(frames['space_fR'], text='6', variable=fRank, value='6')
fR7 = Radiobutton(frames['space_fR'], text='7', variable=fRank, value='7')
fR8 = Radiobutton(frames['space_fR'], text='8', variable=fRank, value='8')
fR1.pack(anchor=E)
fR2.pack(anchor=E)
fR3.pack(anchor=E)
fR4.pack(anchor=E)
fR5.pack(anchor=E)
fR6.pack(anchor=E)
fR7.pack(anchor=E)
fR8.pack(anchor=E)
frames['button'] = ttk.Frame(winMove)
frames['button'].pack()
submit = Button(frames['button'], text="Give input", command=giveMove)
submit.pack(side=LEFT, padx = 15, pady=15)
winMove.mainloop()
def promptMove(self):
def giveMove():
moveDir = os.path.dirname(os.path.realpath(__file__)) + '/../phys/'
filename = moveDir + 'playerMove.txt'
output = open(filename, "w")
output.write('1 ' + e1.get() + ' ' + e2.get())
winPrompt.destroy()
return
winPrompt = Tk()
Label(winPrompt, text="Initial Space (e.g. \"A2\")").grid(row=0)
Label(winPrompt, text="Final Space (e.g. \"A3\")").grid(row=1)
e1 = Entry(winPrompt)
e2 = Entry(winPrompt)
e1.grid(row=0, column=1)
e2.grid(row=1, column=1)
Button(winPrompt, text="Give move", command=giveMove).grid(row=3, pady=5)
winPrompt.mainloop()
return
def showinfo(self, title, msg):
msgbox.showinfo(title, msg)
def guiMain():
winMain = Tk()
winMain.wm_title("UTA Chess Bot")
winMain.config(width=480, height=270)
winMain.pack_propagate(False)
winMain.resizable(False, False)
winMain.mainloop()
def guiStart():
def plSel():
if int(plColor.get()) == 1:
msg = "White goes first."
L1.config(state=NORMAL)
L2.config(state=NORMAL)
L3.config(state=NORMAL)
aiLabel.config(state=NORMAL)
elif int(plColor.get()) == 2:
msg = "Black goes second."
L1.config(state=NORMAL)
L2.config(state=NORMAL)
L3.config(state=NORMAL)
aiLabel.config(state=NORMAL)
elif int(plColor.get()) == 3:
msg = "Two-player; no AI"
L1.config(state=DISABLED)
L2.config(state=DISABLED)
L3.config(state=DISABLED)
aiLabel.config(state=DISABLED)
plLabel.config(text=msg)
def aiSel():
if int(aiLevel.get()) == 1:
msg = "AI does not look ahead."
elif int(aiLevel.get()) == 2:
msg = "AI looks one move ahead."
elif int(aiLevel.get()) == 3:
msg = "AI looks two moves ahead."
aiLabel.config(text=msg)
def startGame():
aiDepth = aiLevel.get()
playerSide = (plColor.get() == 1) #True corresponds to WHITE
if plColor.get() == 3: #Two-player game
aiDepth = 0
if playerSide or (aiDepth == 0): #WHITE; white goes first
msgbox.showinfo("First move", "WHITE, make your first move and press OK.")
useCamera = inFormat.get()
winStart.destroy()
Game.startFromGui(playerSide, aiDepth, useCamera)
guiMain()
def exitGame():
exit(0)
winStart = Tk()
winStart.wm_title("Config Game")
winStart.config(width=480, height=225)
winStart.pack_propagate(False)
#winStart.resizable(False, False)
frames = {}
frames['options'] = ttk.Frame(winStart)
frames['options'].pack(side=TOP, pady=10)
frames['color'] = ttk.LabelFrame(frames['options'], text='Player Color', labelanchor=NW, width = 155, height = 150)
frames['level'] = ttk.LabelFrame(frames['options'], text='AI Level', labelanchor=NW, width = 155, height = 150)
frames['input'] = ttk.LabelFrame(frames['options'], text='Input Format', labelanchor=NW, width=155, height=150)
frames['color'].pack(fill=BOTH, expand=True, anchor=N, side=LEFT)
frames['input'].pack(fill=BOTH, expand=True, anchor=N, side=RIGHT)
frames['level'].pack(fill=BOTH, expand=True, anchor=N, side=RIGHT)
frames['color'].pack_propagate(False)
frames['level'].pack_propagate(False)
frames['input'].pack_propagate(False)
plColor = IntVar()
plColor.set(1)
P1 = Radiobutton(frames['color'], text="White", variable=plColor, value=1, command=plSel)
P2 = Radiobutton(frames['color'], text="Black", variable=plColor, value=2, command=plSel)
P3 = Radiobutton(frames['color'], text="Two-Player", variable=plColor, value=3, command=plSel)
P1.pack(anchor=W)
P2.pack(anchor=W)
P3.pack(anchor=W)
plLabel = Label(frames['color'], text="White goes first.")
plLabel.pack(anchor=W, pady=20)
aiLevel = IntVar()
aiLevel.set(2)
L1 = Radiobutton(frames['level'], text="Level 1", variable=aiLevel, value=1, command=aiSel)
L2 = Radiobutton(frames['level'], text="Level 2", variable=aiLevel, value=2, command=aiSel)
L3 = Radiobutton(frames['level'], text="Level 3", variable=aiLevel, value=3, command=aiSel)
L1.pack(anchor=W)
L2.pack(anchor=W)
L3.pack(anchor=W)
aiLabel = Label(frames['level'], text="AI looks one move ahead.")
aiLabel.pack(anchor=W, pady=20)
inFormat = BooleanVar()
inFormat.set(False)
I1 = Radiobutton(frames['input'], text="Camera", variable=inFormat, value=True)
I2 = Radiobutton(frames['input'], text="GUI", variable=inFormat, value=False)
I1.pack(anchor=W)
I2.pack(anchor=W)
frames['start'] = ttk.Frame(winStart)
frames['start'].pack()
startButton = Button(frames['start'], text="Start Game", command=startGame)
startButton.pack(side=LEFT, padx = 15, pady=15)
exitButton = Button(frames['start'], text="Exit", command=exitGame)
exitButton.pack(side=RIGHT, pady=15)
#winStart.state('zoomed')
winStart.mainloop()
if __name__ == '__main__':
guiStart()
|
from numba import cuda
import numpy as np
from math import tan, sqrt
KB = None
PX_KB = None
SECTOR_LEN = None
NUM_SECTOR = None
FLT_EPSILON = 0.000001
# ppx, ppy, fx, fy, coeffs[0...3]
PPX = 0.0
PPY = 0.0
FX = 0.0
FY = 0.0
#COEFFS = [0.0, 0.0, 0.0, 0.0]
def configure(kb, num_sector, intrinsics=None):
global KB, PX_KB, SECTOR_LEN, NUM_SECTOR, PPX, PPY, FX, FY, COEFFS
KB = kb
PX_KB = kb[0] * kb[1]
SECTOR_LEN = int(PX_KB / 2) # TODO size properly
NUM_SECTOR = num_sector
if intrinsics is not None:
PPX = intrinsics["ppx"]
PPY = intrinsics["ppy"]
FX = intrinsics["fx"]
FY = intrinsics["fy"]
#COEFFS = intrinsics["coeffs"]
# =========== CALCULATIONS ===========
# CUDA kernel. For each thread
@cuda.jit
def encode(im, out):
x, y = cuda.grid(2)
outx = cuda.gridsize(2)[1]*x + y
#print(x, y, g[0], g[1])
x = KB[0]*x
y = KB[1]*y
# Variable length encoding tracking
word = 0
nibblesWritten = 0
# Write XY top left corner in pixel space
out[outx][0] = (y << 16) | x
outIdx = 1
i = 0
# TODO failing to write last section
while i < PX_KB:
zeros = 0
nonzeros = 0
while i < PX_KB:
ix = x + (i % KB[0])
iy = y + (i // KB[0])
z = 0
if im[ix][iy] == 0: # Or below delta
z = 1
# Stop when we've hit a new string of zeros
if zeros != 0 and nonzeros != 0 and z == 1:
break
zeros += z
nonzeros += (1-z)
i += 1
# Write out segment (zeros, nonzeros, values[])
j = i - nonzeros
while True:
if zeros != -1:
value = zeros
zeros = -1
elif nonzeros != -1:
value = nonzeros
nonzeros = -1
else:
jx = x + (j % KB[0])
jy = y + (j // KB[0])
# TODO add filtering, delta, time stuff
value = im[jx][jy]
j += 1
# Embed negative values in positive space
# sequence is 0, 1, -1, 2, -2...
value = (value << 1) ^ (value >> 15) # 16-bit values
# Write data (variable length encoding)
while True:
# Pop lower 3 bits from value
nibble = value & 0x7
value = (value >> 3)
# Indicate more data if some value remains
if value != 0:
nibble |= 0x8
# Append nibble to word
word = (word << 4) | nibble
nibblesWritten += 1
# Flush word when complete (Tegra is 64bit architecture)
if nibblesWritten == 16:
if outIdx >= PX_KB:
break
out[outx][outIdx] = word
outIdx += 1
nibblesWritten = 0
word = 0
# We're done when there's no more data
if value == 0:
break
# Check end condition after start of loop
if zeros == -1 and nonzeros == -1 and j >= i:
break
# Flush any remaining values, offsetting word to end first
word = (word << (4 * (16-nibblesWritten)))
out[outx][outIdx] = word
# Write final length in top 32 bits of first entry
out[outx][0] |= (outIdx << 32)
@cuda.jit()
def decode(enc, out, deproject=False):
encx = cuda.grid(1)
# Extract header & start on next word
x = enc[encx][0] & 0xffff
y = (enc[encx][0] >> 16) & 0xffff
i = 1
word = np.uint64(0)
nibblesWritten = 0
zeros = -1
nonzeros = -1
outIdx = 0
written = 0
bits = 61
value = 0
while True:
# Decode variable
value = 0
nibble = 0
bits = 61 # 64 - 3 bits
while True:
if i >= SECTOR_LEN:
return
if nibblesWritten == 0:
word = enc[encx][i]
i += 1
nibblesWritten = 16
nibble = word & 0xf000000000000000
value = value | (nibble << 1) >> bits
word = word << 4
nibblesWritten -= 1
bits -= 3
if not (nibble & 0x8000000000000000):
break
# Value is now assigned; reverse positive flipping from compression
value = (value >> 1) ^ -(value & 1)
# Reset counts if we've written a set of [zero, nonzero, data]
if zeros == 0 and nonzeros == 0:
return
elif zeros != -1 and nonzeros != -1 and written >= nonzeros:
zeros = -1
nonzeros = -1
written = 0
if zeros == -1:
zeros = value
outIdx += zeros
elif nonzeros == -1:
nonzeros = value
else: # written < nonzeros
if outIdx >= PX_KB:
return
if not deproject:
# Write out to image
out[x + (outIdx % KB[0])][y + (outIdx // KB[0])] = value
else: # Simple deprojection (assume no distortion model)
# Copied from https://github.com/IntelRealSense/librealsense/blob/master/include/librealsense2/rsutil.h#L67
px = float(x + (outIdx % KB[0]))
py = float(y + (outIdx // KB[0]))
offs = int((x + y*KB[0]) + outIdx)
out[offs][0] = value * (px - PPX) / FX
out[offs][1] = value * (py - PPY) / FY
out[offs][2] = value
written += 1
outIdx += 1
|
import numpy as np
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import Aer, execute
from qiskit.tools.visualization import plot_state_city
from qiskit import compile
from qiskit.tools.visualization import plot_histogram
import qiskit.extensions.simulator
from qiskit.providers.aer import StatevectorSimulator
from qiskit.providers.aer import QasmSimulator
from qiskit.circuit import CompositeGate
from qiskit.circuit import Instruction
from qiskit.extensions.exceptions import ExtensionError
# ------------- functions ------------ #
# get state_vector from circuit
def plot_statevector(circ):
simulator = Aer.get_backend('statevector_simulator')
result = execute(circ, simulator).result()
statevector = result.get_statevector(circ)
statevector = statevector ** 2
# plot_state_city(statevector, title='state vector')
return statevector
# add measurements at the end of the circle
def add_measurements(circ):
circ.measure(circ.qregs[0], circ.cregs[0])
return circ
# add measurements in X basis at the end of the circle
def measure_X_basis(circ):
for qreg in circ.qregs:
circ.h(qreg)
return add_measurements(circ)
# add measurements in Y basis at the end of the circle
def measure_Y_basis(circ):
for qreg in circ.qregs:
circ.sdg(qreg)
circ.h(qreg)
return add_measurements(circ)
# get sub-circuit
def sub_circuit(circ_in, num_gates):
# initialize output circuit
qr_out = QuantumRegister(circ_in.qregs[0].size, 'qr')
cr_out = ClassicalRegister(circ_in.cregs[0].size, 'cr')
circ_out = QuantumCircuit(qr_out, cr_out)
for i in range(0, num_gates):
circ_out.append(circ_in.data[i][0], circ_in.data[i][1])
return circ_out
# gives difference of two vectors
def diff_vec(vec1, vec2):
diff=np.linalg.norm(vec1 - vec2)
return diff
# gives all three differences
def all_diff(vec1, vec2, vec3):
diff_vec12 = abs(vec1 - vec2)
diff_vec13 = abs(vec1 - vec3)
diff_vec23 = abs(vec2 - vec3)
return vec1, vec2, vec3
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015 RAPP
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Authors: Konstantinos Panayiotou, Manos Tsardoulias
# contact: klpanagi@gmail.com, etsardou@iti.gr
## @file ServiceController/ServiceControllerSync.py
#
# @copyright Rapp Projecty EU 2015
# @author Konstantinos Panayiotou, [klpanagi@gmail.com]
#
from ServiceControllerBase import *
class ServiceControllerSync(ServiceControllerBase):
""" Synchronous service controller class implementation. """
def __init__(self, service):
"""! Constructor
@param service Service - Service instance.
"""
super(ServiceControllerSync, self).__init__(service)
def run_job(self, msg, url):
"""! Run service job. Call RAPP Platform Web Service
@param msg - The CloudMsg to send
@param url - The Web Service urlpath
"""
# Unpack payload and file objects from cloud service object
payload = msg.req.make_payload()
files = msg.req.make_files()
if self._service.persistent:
resp = self._post_persistent(url, payload, files)
else:
resp = self._post_session_once(url, payload, files)
return resp
|
#
# 行数据拆分
#
# 输入:
# ID,日期,名称,创建者,参与者列表
# 1,2015-09-01,"设计师面试",HR,"小李,小王,小严"
#
# 输出:
# ID,日期,名称,创建者,参与者
# 1,2015-09-01,设计师面试,HR,小李
# 1,2015-09-01,设计师面试,HR,小王
# 1,2015-09-01,设计师面试,HR,小严
import csv
source = 'event.csv'
dest = 'event_detail.csv'
def get_detail():
content = []
with open(source, 'r', encoding='utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
member_list = row['参与者列表'].split(',')
del row['参与者列表']
for member in member_list:
item = row.copy()
item["参与者"] = member
content.append(item)
return content
with open(dest, 'w', encoding='utf-8') as f:
writer = csv.DictWriter(f, ['ID', '日期', '名称', '创建者', '参与者'])
writer.writeheader()
writer.writerows(get_detail())
|
import random
import numpy as np
import pandas
binary = [ 0.0, 1.0 ]
output_values1 = [ -1.0, 1.0 ]
output_values2 = [ -1.0, -0.33333333333333337, 0.33333333333333326, 1.0 ]
output_values4 = [-1.0, -0.8666666666666667, -0.7333333333333334, -0.6,
-0.4666666666666667, -0.33333333333333337, -0.19999999999999996,
-0.06666666666666665, 0.06666666666666665, 0.19999999999999996,
0.33333333333333326, 0.46666666666666656, 0.6000000000000001,
0.7333333333333334, 0.8666666666666667, 1.0]
def gen_lowcard_nom_dataset(n, nattr, seed, max_depth, card_range=[2, 16], ntrees=1):
np.random.seed(seed)
# generate columns
columns = []
for i in range(nattr):
card = np.random.randint(card_range[0], card_range[1])
columns.append(np.random.randint(0, card, n))
output = np.zeros(n)
for i in range(ntrees):
output += gen_output_for_columns(n, columns, max_depth)
# add some noise
#output += 0.05 * np.random.randn(n)
data = {}
for (i, col) in enumerate(columns):
data["col{}".format(i)] = col
data["output"] = output.round(3)
frame = pandas.DataFrame(data = data)
return frame
def gen_output_for_columns(n, columns, max_depth):
# simulate a decision tree to generate output
nattr = len(columns)
print("nattr", nattr);
output = np.zeros(n)
stack = []
stack.append(np.array(range(n))) # node selection
depths = [0]
node_ids = [0]
node_count = 0
while stack:
examples = stack.pop(-1)
depth = depths.pop(-1)
node_id = node_ids.pop(-1)
node_count += 1
print(" ITER: node_id {}, #ex {}, #stack {}, depth {}, #nodes {}".format(
node_id, len(examples), len(stack), depth, node_count))
if depth < max_depth:
if len(examples) == 0: continue
max_tries = 16
for i in range(max_tries): # try 16 times for a non-zero split
column_j = np.random.randint(0, nattr)
values = columns[column_j][examples]
split_val = values[np.random.randint(0, len(values))]
left = examples[values == split_val]
if len(left) > 0 and len(left) < len(examples): break
print(" NON-ZERO SPLIT FAIL {} col{}={}".format(i, column_j, split_val),
"zero it is" if i==max_tries-1 else "")
right = examples[values != split_val]
print("SPLIT: node_id {}, column {}, split_val {}, #left {}, #right {}".format(
node_id, column_j, split_val, len(left), len(right)))
stack.append(right)
stack.append(left) # left first
depths.append(depth + 1) # right
depths.append(depth + 1) # left
node_ids.append(2 * node_id + 2)
node_ids.append(2 * node_id + 1)
else:
#leaf_value = 2 * np.random.rand() - 1
leaf_value = random.choice(output_values1)
#leaf_value = random.choice(binary)
print(" LEAF: node_id {} value {}".format(node_id, leaf_value))
output[examples] = leaf_value
return np.array(output)
if __name__ == "__main__":
seed = 14
n = 100
attr = 4
max_depth = 3
card_range = [4, 5]
compression = False
test_frac = 0.0
ntrees = 1
#for attr in [4, 8, 16, 32, 64, 128, 256]:
compr_opt = "gzip" if compression else None
compr_ext = ".gz" if compression else ""
ftrain = "/tmp/train{:03}-{}.csv{}".format(attr, n, compr_ext)
ftest = "/tmp/test{:03}-{}.csv{}".format(attr, n, compr_ext)
frame = gen_lowcard_nom_dataset(int(n * (1.0+test_frac)), attr, seed,
max_depth=max_depth,
card_range=card_range,
ntrees=ntrees)
print(ftrain)
frame[0:n].to_csv(ftrain, header=True, index=False, compression=compr_opt)
if test_frac > 0.0:
print(ftest)
frame[n:].to_csv(ftest, header=True, index=False, compression=compr_opt)
|
"""This module contains the ``SeleniumRequest`` class"""
from scrapy import Request
from scrapy.http import HtmlResponse
from parsel.csstranslator import HTMLTranslator
from cssselect.parser import SelectorSyntaxError
from selenium.webdriver.support.ui import WebDriverWait
class SeleniumRequest(Request):
"""Scrapy ``Request`` subclass providing additional arguments"""
def __init__(self, wait_time=None, wait_until=None, screenshot=False, script=None, *args, **kwargs):
"""Initialize a new selenium request
Parameters
----------
wait_time: int
The number of seconds to wait.
wait_until: method
One of the "selenium.webdriver.support.expected_conditions". The response
will be returned until the given condition is fulfilled.
screenshot: bool
If True, a screenshot of the page will be taken and the data of the screenshot
will be returned in the response "meta" attribute.
script: str
JavaScript code to execute.
"""
self.wait_time = wait_time
self.wait_until = wait_until
self.screenshot = screenshot
self.script = script
super().__init__(*args, **kwargs)
class SeleniumResponse(HtmlResponse):
@property
def interact(self):
"""Shortcut to the WebDriver"""
return self.meta['driver']
def click(self, query):
"""Clicks on an element specified by query, and reloads the pages source into to the body"""
try:
xpath_query = HTMLTranslator().css_to_xpath(query)
except SelectorSyntaxError:
xpath_query = query
self.interact.find_element_by_xpath(xpath_query).click()
if self.request.wait_until:
WebDriverWait(self.interact, self.request.wait_time).until(self.request.wait_until)
return self.replace(url=self.interact.current_url,
body=str.encode(self.interact.page_source))
|
import os
import time
import carla
import gym
import numpy as np
import gym_carla_feature
from gym_carla_feature.start_env.misc import set_carla_transform
os.environ["SDL_VIDEODRIVER"] = "dummy"
# os.environ["DISPLAY"] = "localhost:12.0"
RENDER = False
def rule_demo(args):
from gym_carla_feature.start_env.config import params
params['port'] = (args.port[0] if len(args.port) == 1 else args.port) if hasattr(args, 'port') else 2280
params['max_waypt'] = args.max_waypt if hasattr(args, 'max_waypt') else 12
params['max_step'] = args.max_step if hasattr(args, 'max_step') else 1000
params['dt'] = args.dt if hasattr(args, 'dt') else 1 / 20
params['if_noise_dt'] = args.if_noise_dt if hasattr(args, 'if_noise_dt') else False
params['noise_std'] = args.noise_std if hasattr(args, 'noise_std') else 0.01
params['noise_interval'] = args.noise_interval if hasattr(args, 'noise_interval') else 10
params['render'] = RENDER
params['autopilot'] = args.autopilot if hasattr(args, 'autopilot') else False
params['desired_speed'] = args.desired_speed if hasattr(args, 'desired_speed') else 20
params['out_lane_thres'] = args.out_lane_thres if hasattr(args, 'out_lane_thres') else 5
params['sampling_radius'] = args.sampling_radius if hasattr(args, 'sampling_radius') else 3
# params['obs_space_type'] = args.obs_space_type if hasattr(args,'obs_space_type') else ['orgin_state', 'waypoint']
params['town'] = args.town if hasattr(args, 'town') else 'Town07'
params['task_mode'] = args.task_mode if hasattr(args, 'task_mode') else 'mountainroad'
params['reward_type'] = args.reward_type if hasattr(args, 'reward_type') else 1
params['if_dest_end'] = args.if_dest_end if hasattr(args, 'if_dest_end') else False
# one_eval(args,params)
eval(args, params)
def eval(args, params):
debug = args.debug if hasattr(args, 'debug') else False
test_num = args.test_num if hasattr(args, 'test_num') else 1000
env = gym.make('carla-v2', params=params)
from gym_carla_feature.start_env.navigation.behavior_agent import BehaviorAgent
result = {}
result['return'] = []
result['avg_r'] = []
result['avg_v'] = []
result['velocity_lon'] = []
result['velocity_lat'] = []
result['distance'] = []
result['step_num'] = []
result['delta_yaw'] = []
result['delta_steer'] = []
result['lat_distance'] = []
result['yaw_angle'] = []
result['delta_a_lon'] = []
result['delta_a_lat'] = []
result['delta_jerk_lon'] = []
result['delta_jerk_lat'] = []
result['outroute'] = []
result['collision'] = []
result['time'] = []
for ep_i in range(test_num):
ss_time = time.time()
obs = env.reset()
return_ = 0
total_time = 0
total_steps = 0
delta_yaw = 0
delta_steer = 0
a_lon = 0
a_lat = 0
jerk_lon = 0
jerk_lat = 0
lat_distance = 0
yaw_angle = 0
avg_v = 0
velocity_lon = 0
velocity_lat = 0
distance = 0
outroute = 0
collision = 0
time_info = 0
ep_info = {
'position':[],
'reward': [],
'velocity': [],
'velocity_lon': [],
'velocity_lat': [],
'delta_steer': [],
'lon_action': [],
'steer': [],
'a0': [],
'a1': [],
'lat_distance': [],
'yaw_angle': [],
'delta_yaw': [],
'delta_a_lon': [],
'delta_a_lat': [],
'delta_jerk_lon': [],
'delta_jerk_lat': [],
}
agent = BehaviorAgent(env.ego, behavior='normal')
# agent = BehaviorAgent(env.ego, behavior='cautious')
# agent = BehaviorAgent(env.ego, behavior='aggressive')
# agent.set_destination(set_carla_transform(env.route_begin).location,
# set_carla_transform(env.route_dest).location,
# clean=True)
agent.set_global_plan(env.routeplanner.get_waypoints_queue(), clean=True)
agent.update_information(env.ego, params['desired_speed'] * 3.6)
for i in range(1, env.max_step + 1):
# # top view
if RENDER:
spectator = env.world.get_spectator()
transform = env.ego.get_transform()
spectator.set_transform(carla.Transform(transform.location + carla.Location(z=40),
carla.Rotation(pitch=-90)))
act_control = agent.run_step(debug=debug)
start_time = time.time()
obs, r, done, info = env.step(act_control)
agent.update_information(env.ego, params['desired_speed'] * 3.6)
if agent.is_empty_plan():
temp_deque = env.routeplanner.get_waypoints_queue()
tem_transform = agent._local_planner.waypoints_queue[-1][0].transform
while not tem_transform == temp_deque[0][0].transform:
temp_deque.popleft()
agent.set_global_plan(temp_deque, clean=False)
avg_v += info['velocity']
velocity_lon += info['velocity_lon']
velocity_lat += abs(info['velocity_lat'])
end_time = time.time()
curr_time = end_time - start_time
# print(f"run time:{curr_time}")
total_time += curr_time
total_steps += 1
delta_yaw += abs(info['delta_yaw'])
delta_steer += abs(info['delta_steer'])
lat_distance += abs(info['lat_distance'])
yaw_angle += abs(info['yaw_angle'])
a_lon += abs(info['acc_lon'])
a_lat += abs(info['acc_lat'])
jerk_lon += abs(info['jerk_lon'])
jerk_lat += abs(info['jerk_lat'])
distance = info['distance']
outroute += info['outroute']
collision += info['collision']
time_info += info['time']
ego_location = env.ego.get_transform().location
ep_info['position'].append([ego_location.x, ego_location.y])
ep_info['reward'].append(r)
ep_info['velocity'].append(info['velocity'])
ep_info['velocity_lon'].append(info['velocity_lon'])
ep_info['velocity_lat'].append(info['velocity_lat'])
ep_info['delta_steer'].append(info['delta_steer'])
ep_info['lon_action'].append(info['lon_action'])
ep_info['steer'].append(info['steer'])
ep_info['a0'].append(info['a0'])
ep_info['a1'].append(info['a1'])
ep_info['yaw_angle'].append(info['yaw_angle'])
ep_info['lat_distance'].append(info['lat_distance'])
ep_info['delta_yaw'].append(info['delta_yaw'])
ep_info['delta_a_lon'].append(info['acc_lon'])
ep_info['delta_a_lat'].append(info['acc_lat'])
ep_info['delta_jerk_lon'].append(info['jerk_lon'])
ep_info['delta_jerk_lat'].append(info['jerk_lat'])
return_ += r
if done:
break
# print("*"*60)
# print(f"ep{ep_i} used time:{time.time()-ss_time}s")
# print("step num:", i,
# "\n return:", return_,
# "\n avg_v:", avg_v / i,
# "\n distance:", distance,
# "\n outroute:", outroute,
# "\n collision:", collision,
# "\n delta_yaw(per step):", delta_yaw / i,
# "\n delta_steer(per step):", delta_steer / i,
# "\n lat_distance(per step):", lat_distance / i,
# "\n yaw_angle(per step):", yaw_angle / i,
# "\n delta_a_lon(per step):", a_lon / i,
# "\n delta_a_lat(per step):", a_lat / i,
# "\n delta_jerk_lon(per step):", jerk_lon / i,
# "\n delta_jerk_lat(per step):", jerk_lat / i,
# )
result['step_num'].append(i)
result['return'].append(return_)
result['avg_r'].append(return_ / i)
result['avg_v'].append(avg_v / i)
result['velocity_lat'].append(velocity_lat / i)
result['velocity_lon'].append(velocity_lon / i)
result['distance'].append(distance)
result['collision'].append(collision)
result['outroute'].append(outroute)
result['delta_yaw'].append(delta_yaw / i)
result['delta_steer'].append(delta_steer / i)
result['lat_distance'].append(lat_distance / i)
result['yaw_angle'].append(yaw_angle / i)
result['delta_a_lon'].append(a_lon / i)
result['delta_a_lat'].append(a_lat / i)
result['delta_jerk_lon'].append(jerk_lon / i)
result['delta_jerk_lat'].append(jerk_lat / i)
result['time'].append(time_info)
print(f'test {test_num} episode finished !!!')
save_path = f'./veh_control_logs/eval/pid'
if not os.path.exists(save_path):
os.mkdir(save_path)
for k, vl in ep_info.items():
np.save(f'{save_path}/ep-{k}', np.array(vl))
safe_data = []
comfort_data = []
for k, vl in result.items():
print(f'{k}: <mean>{np.array(vl).mean(axis=0)} | <std>{np.array(vl).std(axis=0)}')
if k in ['outroute', 'collision']:
safe_data.append(np.array(vl))
if k in ['delta_a_lon', 'delta_a_lat', 'delta_jerk_lon', 'delta_jerk_lat']:
comfort_data.append(np.array(vl))
safe = []
for outlane, collsion in zip(safe_data[0], safe_data[1]):
safe.append((outlane or collsion))
comfort = []
for delta_a_lon, delta_a_lat, delta_jerk_lon, delta_jerk_lat in zip(comfort_data[0], comfort_data[1],
comfort_data[2], comfort_data[3]):
comfort.append((1 / (1 + (delta_a_lon + delta_a_lat + delta_jerk_lon + delta_jerk_lat))))
print(f'safe: <mean>{1 - np.array(safe).mean()} | <std>{np.array(safe).std()}')
print(f'comfort: <mean>{np.array(comfort).mean()} | <std>{np.array(comfort).std()}')
def one_eval(args, params):
debug = args.debug if hasattr(args, 'debug') else False
test_num = args.test_num if hasattr(args, 'test_num') else 1000
env = gym.make('carla-v2', params=params)
from gym_carla_feature.start_env.navigation.behavior_agent import BehaviorAgent
ss_time = time.time()
obs = env.reset()
return_ = 0
total_time = 0
total_steps = 0
delta_yaw = 0
delta_steer = 0
a_lon = 0
a_lat = 0
jerk_lon = 0
jerk_lat = 0
lat_distance = 0
yaw_angle = 0
avg_v = 0
distance = 0
outroute = 0
collision = 0
agent = BehaviorAgent(env.ego, behavior='normal')
# agent = BehaviorAgent(env.ego, behavior='cautious')
# agent = BehaviorAgent(env.ego, behavior='aggressive')
# agent.set_destination(set_carla_transform(env.route_begin).location,
# set_carla_transform(env.route_dest).location,
# clean=True)
agent.set_global_plan(env.routeplanner.get_waypoints_queue(), clean=True)
agent.update_information(env.ego, params['desired_speed'] * 3.6)
for i in range(1, env.max_step + 1):
# # top view
if RENDER:
spectator = env.world.get_spectator()
transform = env.ego.get_transform()
spectator.set_transform(carla.Transform(transform.location + carla.Location(z=40),
carla.Rotation(pitch=-90)))
act_control = agent.run_step(debug=debug)
start_time = time.time()
obs, r, done, info = env.step(act_control)
agent.update_information(env.ego, params['desired_speed'] * 3.6)
if agent.is_empty_plan():
temp_deque = env.routeplanner.get_waypoints_queue()
tem_transform = agent._local_planner.waypoints_queue[-1][0].transform
while not tem_transform == temp_deque[0][0].transform:
temp_deque.popleft()
agent.set_global_plan(temp_deque, clean=False)
end_time = time.time()
curr_time = end_time - start_time
# print(f"run time:{curr_time}")
total_time += curr_time
total_steps += 1
print("-" * 60)
print(env.r_info)
print("step :", i,
"\n return:", r,
"\n avg_v:", info['velocity'],
"\n delta_yaw(per step):", info['delta_yaw'],
"\n delta_steer(per step):", info['delta_steer'],
"\n lat_distance(per step):", info['lat_distance'],
"\n yaw_angle(per step):", info['yaw_angle'],
"\n delta_a_lon(per step):", info['acc_lon'],
"\n delta_a_lat(per step):", info['acc_lat'],
"\n delta_jerk_lon(per step):", info['jerk_lon'],
"\n delta_jerk_lat(per step):", info['jerk_lat'],
)
avg_v += info['velocity']
delta_yaw += info['delta_yaw']
delta_steer += info['delta_steer']
lat_distance += info['lat_distance']
yaw_angle += info['yaw_angle']
a_lon += info['acc_lon']
a_lat += info['acc_lat']
jerk_lon += info['jerk_lon']
jerk_lat += info['jerk_lat']
distance = info['distance']
outroute += info['outroute']
collision += info['collision']
return_ += r
if done:
break
print("*" * 60)
print(f"used time:{time.time() - ss_time}s")
print("step num:", i,
"\n return:", return_,
"\n avg_v:", avg_v / i,
"\n distance:", distance,
"\n outroute:", outroute,
"\n collision:", collision,
"\n delta_yaw(per step):", delta_yaw / i,
"\n delta_steer(per step):", delta_steer / i,
"\n lat_distance(per step):", lat_distance / i,
"\n yaw_angle(per step):", yaw_angle / i,
"\n delta_a_lon(per step):", a_lon / i,
"\n delta_a_lat(per step):", a_lat / i,
"\n delta_jerk_lon(per step):", jerk_lon / i,
"\n delta_jerk_lat(per step):", jerk_lat / i,
)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Carla RL')
# parser.add_argument('--demo_type', type=str, default='ppo2')
parser.add_argument('--debug', default=False, action="store_true", )
# parser.add_argument('--policy_type', type=str, default=None)
# parser.add_argument('--objective_type', type=str, default='clip')
# parser.add_argument('--if_ir', default=False, action="store_true", )
# parser.add_argument('--lambda_entropy', type=float, default=0.01)
# parser.add_argument('--ratio_clip', type=float, default=0.25)
# parser.add_argument('--lambda_gae_adv', type=float, default=0.97)
# parser.add_argument('--if_dest_end', default=False, action="store_true", )
# parser.add_argument('--discrete_steer', nargs='+', type=float, default=1)
parser.add_argument('--reward_type', type=int, default=12)
parser.add_argument('--port', nargs='+', type=int, default=[2050])
parser.add_argument('--desired_speed', type=float, default=12)
parser.add_argument('--max_step', type=int, default=800)
parser.add_argument('--dt', type=float, default=0.05)
parser.add_argument('--test_num', type=int, default=1)
# parser.add_argument('--town', type=str, default='Town03')
# parser.add_argument('--task_mode', type=str, default='urbanroad')
parser.add_argument('--noise_std', type=float, default=0.01)
parser.add_argument('--noise_interval', type=int, default=1)
parser.add_argument('--if_noise_dt', default=False, action="store_true", )
args = parser.parse_args()
noise_std_list = [0.0]
args.if_noise_dt = False
# noise_std_list = [0., 0.01, 0.05, 0.1]
for noise_std in noise_std_list:
args.noise_std = noise_std
print('*' * 10, noise_std, '*' * 10)
rule_demo(args)
# v_list = [9, 11, 13, 14]
# for v in v_list:
# args.desired_speed = v
# print('*' * 10, v, '*' * 10)
# rule_demo(args)
|
from django.apps import AppConfig
class ArticlesConfig(AppConfig):
default_auto_field = 'django.db.models.AutoField'
name = 'comp.articles'
verbose_name = 'Articles'
|
"""Utility code for constructing importers, etc."""
from ._bootstrap import module_for_loader
from ._bootstrap import set_loader
from ._bootstrap import set_package
|
"""traits/parser.py - Parses trait-based arguments."""
from . import traitcommon
def parse_traits(*args) -> dict:
"""Parses arguments and puts them into a dictionary."""
ratings = {}
for arg in args:
split = arg.split("=")
trait = split[0].strip()
rating = None
traitcommon.validate_trait_names(trait)
if len(split) > 2:
raise SyntaxError(f"Invalid argument: `{arg}`.")
if len(split) == 2:
rating = split[1].strip()
if not rating.isdigit():
raise SyntaxError(f"`{trait}` must be a number between 0 and 5.")
rating = int(split[1])
if not 0 <= rating <= 5:
raise ValueError(f"`{trait}` must be between 0 and 5. (Got {rating}.)")
if trait in ratings:
raise SyntaxError(f"You can only add `{trait}` once.")
ratings[trait] = rating
return ratings
|
import logging
import sys
from pathlib import Path
LOG_PATH = Path(__file__).parent.resolve().joinpath("logs")
if not LOG_PATH.exists():
LOG_PATH.mkdir(parents=True, exist_ok=True)
LOG_LEVEL = logging.INFO
LOG_FORMAT = logging.Formatter("%(asctime)s %(name)s %(levelname)-8s %(message)s",
"%Y-%m-%d %H:%M:%S")
def create_logger(slug: str) -> logging.Logger:
"""Creates and returns a new logger."""
logger = logging.getLogger(slug)
# Return logger if already created
if logger.handlers:
return logger
logger.setLevel(LOG_LEVEL)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(LOG_FORMAT)
stream_handler.setLevel(LOG_LEVEL)
logger.addHandler(stream_handler)
log_file_path = LOG_PATH.joinpath(f"{slug}.log")
file_handler = logging.FileHandler(str(log_file_path))
file_handler.setFormatter(LOG_FORMAT)
file_handler.setLevel(LOG_LEVEL)
logger.addHandler(file_handler)
return logger
|
#!/usr/bin/env python
# coding=utf-8
"""
给定 amount 金额和coins数组的硬币,
求凑成amount金额的钱,最少需要多少枚硬币
eg.
amount = 11
coins = [1,2,5]
输出 3 (5元 *2 + 1元 *1)
"""
# 保存中间结果
map_ = {}
def exchange(amount,coins):
if map_.get(amount):
return map_.get(amount)
if amount ==0 :
return 0
if amount < 0:
return -1
result = 2**32
for i in range(len(coins)):
sub_min = exchange(amount - coins[i],coins)
if sub_min == -1:
continue
result = min(sub_min+1,result)
if result == 2**32:
return -1
map_[amount] = result
return result
if __name__ == '__main__':
amount = 11
coins = [1,2,5]
result = exchange(amount,coins)
print("result: {}".format(result))
|
import unittest
from collections import OrderedDict
from hamlpy.compiler import Compiler
from hamlpy.parser.attributes import read_attribute_dict
from hamlpy.parser.core import Stream, ParseException
class AttributeDictParserTest(unittest.TestCase):
@staticmethod
def _parse(text):
return read_attribute_dict(Stream(text), Compiler())
def test_read_ruby_style_attribute_dict(self):
# empty dict
stream = Stream("{}><")
assert dict(read_attribute_dict(stream, Compiler())) == {}
assert stream.text[stream.ptr:] == '><'
# string values
assert dict(self._parse("{'class': 'test'} =Test")) == {'class': 'test'}
assert dict(self._parse("{'class': 'test', 'id': 'something'}")) == {'class': 'test', 'id': 'something'}
# integer values
assert dict(self._parse("{'data-number': 0}")) == {'data-number': '0'}
assert dict(self._parse("{'data-number': 12345}")) == {'data-number': '12345'}
# float values
assert dict(self._parse("{'data-number': 123.456}")) == {'data-number': '123.456'}
assert dict(self._parse("{'data-number': 0.001}")) == {'data-number': '0.001'}
# None value
assert dict(self._parse("{'controls': None}")) == {'controls': None}
# boolean attributes
assert dict(self._parse(
"{disabled, class:'test', data-number : 123,\n foo:\"bar\"}"
)) == {'disabled': True, 'class': 'test', 'data-number': '123', 'foo': 'bar'}
assert dict(self._parse(
"{class:'test', data-number : 123,\n foo:\"bar\", \t disabled}"
)) == {'disabled': True, 'class': 'test', 'data-number': '123', 'foo': 'bar'}
# attribute name has colon
assert dict(self._parse("{'xml:lang': 'en'}")) == {'xml:lang': 'en'}
# attribute value has colon or commas
assert dict(self._parse("{'lang': 'en:g'}")) == {'lang': 'en:g'}
assert dict(self._parse(
'{name:"viewport", content:"width:device-width, initial-scale:1, minimum-scale:1, maximum-scale:1"}'
)) == {'name': 'viewport', 'content': 'width:device-width, initial-scale:1, minimum-scale:1, maximum-scale:1'}
# double quotes
assert dict(self._parse('{"class": "test", "id": "something"}')) == {'class': 'test', 'id': 'something'}
# no quotes for key
assert dict(self._parse("{class: 'test', id: 'something'}")) == {'class': 'test', 'id': 'something'}
# whitespace is ignored
assert dict(self._parse(
"{ class \t : 'test', data-number: 123 }"
)) == {'class': 'test', 'data-number': '123'}
# trailing commas are fine
assert dict(self._parse("{class: 'test', data-number: 123,}")) == {'class': 'test', 'data-number': '123'}
# attributes split onto multiple lines
assert dict(self._parse("{class: 'test',\n data-number: 123}")) == {'class': 'test', 'data-number': '123'}
# old style Ruby
assert dict(self._parse("{:class => 'test', :data-number=>123}")) == {'class': 'test', 'data-number': '123'}
# list attribute values
assert dict(self._parse(
"{'class': [ 'a', 'b', 'c' ], data-list:[1, 2, 3]}"
)) == {'class': ['a', 'b', 'c'], 'data-list': ['1', '2', '3']}
# tuple attribute values
assert dict(self._parse(
"{:class=>( 'a', 'b', 'c' ), :data-list => (1, 2, 3)}"
)) == {'class': ['a', 'b', 'c'], 'data-list': ['1', '2', '3']}
# attribute order is maintained
assert self._parse(
"{'class': 'test', 'id': 'something', foo: 'bar'}"
) == OrderedDict([('class', 'test'), ('id', 'something'), ('foo', 'bar')])
# attribute values can be multi-line Haml
assert dict(self._parse("""{
'class':
- if forloop.first
link-first
\x20
- else
- if forloop.last
link-last
'href':
- url 'some_view'
}"""
)) == {
'class': '{% if forloop.first %} link-first {% else %} {% if forloop.last %} link-last {% endif %} {% endif %}', # noqa
'href': "{% url 'some_view' %}"
}
# non-ascii attribute values
assert dict(self._parse("{class: 'test\u1234'}")) == {'class': 'test\u1234'}
def test_read_html_style_attribute_dict(self):
# html style dicts
assert dict(self._parse("()><")) == {}
assert dict(self._parse("( )")) == {}
# string values
assert dict(self._parse("(class='test') =Test")) == {'class': 'test'}
assert dict(self._parse("(class='test' id='something')")) == {'class': 'test', 'id': 'something'}
# integer values
assert dict(self._parse("(data-number=0)")) == {'data-number': '0'}
assert dict(self._parse("(data-number=12345)")) == {'data-number': '12345'}
# float values
assert dict(self._parse("(data-number=123.456)")) == {'data-number': '123.456'}
assert dict(self._parse("(data-number=0.001)")) == {'data-number': '0.001'}
# None value
assert dict(self._parse("(controls=None)")) == {'controls': None}
# boolean attributes
assert dict(self._parse(
"(disabled class='test' data-number = 123\n foo=\"bar\")"
)) == {'disabled': True, 'class': 'test', 'data-number': '123', 'foo': 'bar'}
assert dict(self._parse(
"(class='test' data-number = 123\n foo=\"bar\" \t disabled)"
)) == {'disabled': True, 'class': 'test', 'data-number': '123', 'foo': 'bar'}
# attribute name has colon
assert dict(self._parse('(xml:lang="en")')) == {'xml:lang': 'en'}
# attribute names with characters found in JS frameworks
assert dict(self._parse('([foo]="a" ?foo$="b")')) == {'[foo]': 'a', '?foo$': 'b'}
# double quotes
assert dict(self._parse('(class="test" id="something")')) == {'class': 'test', 'id': 'something'}
# list attribute values
assert dict(self._parse(
"(class=[ 'a', 'b', 'c' ] data-list=[1, 2, 3])"
)) == {'class': ['a', 'b', 'c'], 'data-list': ['1', '2', '3']}
# variable attribute values
assert dict(self._parse('(foo=bar)')) == {'foo': '{{ bar }}'}
# attribute values can be multi-line Haml
assert dict(self._parse("""(
class=
- if forloop.first
link-first
\x20
- else
- if forloop.last
link-last
href=
- url 'some_view'
)"""
)) == {
'class': '{% if forloop.first %} link-first {% else %} {% if forloop.last %} link-last {% endif %} {% endif %}', # noqa
'href': "{% url 'some_view' %}"
}
def test_empty_attribute_name_raises_error(self):
# empty quoted string in Ruby new style
with self.assertRaisesRegex(ParseException, r'Attribute name can\'t be an empty string. @ "{\'\':" <-'):
self._parse("{'': 'test'}")
# empty old style Ruby attribute
with self.assertRaisesRegex(ParseException, r'Unexpected " ". @ "{: " <-'):
self._parse("{: 'test'}")
# missing (HTML style)
with self.assertRaisesRegex(ParseException, r'Unexpected "=". @ "\(=" <-'):
self._parse("(='test')")
with self.assertRaisesRegex(ParseException, r'Unexpected "=". @ "\(foo=\'bar\' =" <-'):
self._parse("(foo='bar' ='test')")
def test_empty_attribute_value_raises_error(self):
with self.assertRaisesRegex(ParseException, r'Unexpected "}". @ "{:class=>}" <-'):
self._parse("{:class=>}")
with self.assertRaisesRegex(ParseException, r'Unexpected "}". @ "{class:}" <-'):
self._parse("{class:}")
with self.assertRaisesRegex(ParseException, r'Unexpected "\)". @ "\(class=\)" <-'):
self._parse("(class=)")
def test_unterminated_string_raises_error(self):
# on attribute key
with self.assertRaisesRegex(ParseException, r'Unterminated string \(expected \'\). @ "{\'test: 123}" <-'):
self._parse("{'test: 123}")
# on attribute value
with self.assertRaisesRegex(ParseException, r'Unterminated string \(expected "\). @ "{\'test\': "123}" <-'):
self._parse("{'test': \"123}")
def test_duplicate_attributes_raise_error(self):
with self.assertRaisesRegex(ParseException, r'Duplicate attribute: "class". @ "{class: \'test\', class: \'bar\'}" <-'): # noqa
self._parse("{class: 'test', class: 'bar'}")
with self.assertRaisesRegex(ParseException, r'Duplicate attribute: "class". @ "\(class=\'test\' class=\'bar\'\)" <-'): # noqa
self._parse("(class='test' class='bar')")
def test_mixing_ruby_and_html_syntax_raises_errors(self):
# omit comma in Ruby style dict
with self.assertRaisesRegex(ParseException, r'Expected ",". @ "{class: \'test\' f" <-'):
self._parse("{class: 'test' foo: 'bar'}")
# use = in Ruby style dict
with self.assertRaisesRegex(ParseException, r'Expected ":". @ "{class=" <-'):
self._parse("{class='test'}")
with self.assertRaisesRegex(ParseException, r'Expected "=>". @ "{:class=" <-'):
self._parse("{:class='test'}")
# use colon as assignment for old style Ruby attribute
with self.assertRaisesRegex(ParseException, r'Expected "=>". @ "{:class:" <-'):
self._parse("{:class:'test'}")
# use comma in HTML style dict
with self.assertRaisesRegex(ParseException, r'Unexpected ",". @ "\(class=\'test\'," <-'):
self._parse("(class='test', foo = 'bar')")
# use : for assignment in HTML style dict (will treat as part of attribute name)
with self.assertRaisesRegex(ParseException, r'Unexpected "\'". @ "\(class:\'" <-'):
self._parse("(class:'test')")
# use attribute quotes in HTML style dict
with self.assertRaisesRegex(ParseException, r'Unexpected "\'". @ "\(\'" <-'):
self._parse("('class'='test')")
# use => in HTML style dict
with self.assertRaisesRegex(ParseException, r'Unexpected ">". @ "\(class=>" <-'):
self._parse("(class=>'test')")
# use tuple syntax in HTML style dict
with self.assertRaisesRegex(ParseException, r'Unexpected "\(". @ "\(class=\(" <-'):
self._parse("(class=(1, 2))")
def test_unexpected_eof(self):
with self.assertRaisesRegex(ParseException, r'Unexpected end of input. @ "{:class=>" <-'):
self._parse("{:class=>")
with self.assertRaisesRegex(ParseException, r'Unexpected end of input. @ "{class:" <-'):
self._parse("{class:")
with self.assertRaisesRegex(ParseException, r'Unexpected end of input. @ "\(class=" <-'):
self._parse("(class=")
|
# This file is part of the MapProxy project.
# Copyright (C) 2010 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Service requests (parsing, handling, etc).
"""
from mapproxy.compat import PY2, iteritems, text_type
from mapproxy.compat.modules import parse_qsl, urlparse, quote
from mapproxy.util.py import cached_property
class NoCaseMultiDict(dict):
"""
This is a dictionary that allows case insensitive access to values.
>>> d = NoCaseMultiDict([('A', 'b'), ('a', 'c'), ('B', 'f'), ('c', 'x'), ('c', 'y'), ('c', 'z')])
>>> d['a']
'b'
>>> d.get_all('a')
['b', 'c']
>>> 'a' in d and 'b' in d
True
"""
def _gen_dict(self, mapping=()):
"""A `NoCaseMultiDict` can be constructed from an iterable of
``(key, value)`` tuples or a dict.
"""
tmp = {}
if isinstance(mapping, NoCaseMultiDict):
for key, value in mapping.iteritems(): #pylint: disable-msg=E1103
tmp.setdefault(key.lower(), (key, []))[1].extend(value)
else:
if isinstance(mapping, dict):
itr = iteritems(mapping)
else:
itr = iter(mapping)
for key, value in itr:
tmp.setdefault(key.lower(), (key, []))[1].append(value)
return tmp
def __init__(self, mapping=()):
"""A `NoCaseMultiDict` can be constructed from an iterable of
``(key, value)`` tuples or a dict.
"""
dict.__init__(self, self._gen_dict(mapping))
def update(self, mapping=(), append=False):
"""A `NoCaseMultiDict` can be updated from an iterable of
``(key, value)`` tuples or a dict.
"""
for _, (key, values) in iteritems(self._gen_dict(mapping)):
self.set(key, values, append=append, unpack=True)
def __getitem__(self, key):
"""
Return the first data value for this key.
:raise KeyError: if the key does not exist
"""
if key in self:
return dict.__getitem__(self, key.lower())[1][0]
raise KeyError(key)
def __setitem__(self, key, value):
dict.setdefault(self, key.lower(), (key, []))[1][:] = [value]
def __delitem__(self, key):
dict.__delitem__(self, key.lower())
def __contains__(self, key):
return dict.__contains__(self, key.lower())
def __getstate__(self):
data = []
for key, values in self.iteritems():
for v in values:
data.append((key, v))
return data
def __setstate__(self, data):
self.__init__(data)
def get(self, key, default=None, type_func=None):
"""Return the default value if the requested data doesn't exist.
If `type_func` is provided and is a callable it should convert the value,
return it or raise a `ValueError` if that is not possible. In this
case the function will return the default as if the value was not
found.
Example:
>>> d = NoCaseMultiDict(dict(foo='42', bar='blub'))
>>> d.get('foo', type_func=int)
42
>>> d.get('bar', -1, type_func=int)
-1
"""
try:
rv = self[key]
if type_func is not None:
rv = type_func(rv)
except (KeyError, ValueError):
rv = default
return rv
def get_all(self, key):
"""
Return all values for the key as a list. Returns an empty list, if
the key doesn't exist.
"""
if key in self:
return dict.__getitem__(self, key.lower())[1]
else:
return []
def set(self, key, value, append=False, unpack=False):
"""
Set a `value` for the `key`. If `append` is ``True`` the value will be added
to other values for this `key`.
If `unpack` is True, `value` will be unpacked and each item will be added.
"""
if key in self:
if not append:
dict.__getitem__(self, key.lower())[1][:] = []
else:
dict.__setitem__(self, key.lower(), (key, []))
if unpack:
for v in value:
dict.__getitem__(self, key.lower())[1].append(v)
else:
dict.__getitem__(self, key.lower())[1].append(value)
def iteritems(self):
"""
Iterates over all keys and values.
"""
if PY2:
for _, (key, values) in dict.iteritems(self):
yield key, values
else:
for _, (key, values) in dict.items(self):
yield key, values
def copy(self):
"""
Returns a copy of this object.
"""
return self.__class__(self)
def __repr__(self):
tmp = []
for key, values in self.iteritems():
tmp.append((key, values))
return '%s(%r)' % (self.__class__.__name__, tmp)
def url_decode(qs, charset='utf-8', decode_keys=False, include_empty=True,
errors='ignore'):
"""
Parse query string `qs` and return a `NoCaseMultiDict`.
"""
tmp = []
for key, value in parse_qsl(qs, include_empty):
if PY2:
if decode_keys:
key = key.decode(charset, errors)
tmp.append((key, value.decode(charset, errors)))
else:
if not isinstance(key, text_type):
key = key.decode(charset, errors)
if not isinstance(value, text_type):
value = value.decode(charset, errors)
tmp.append((key, value))
return NoCaseMultiDict(tmp)
class Request(object):
charset = 'utf8'
def __init__(self, environ):
self.environ = environ
self.environ['mapproxy.request'] = self
script_name = environ.get('HTTP_X_SCRIPT_NAME', '')
if script_name:
del environ['HTTP_X_SCRIPT_NAME']
environ['SCRIPT_NAME'] = script_name
path_info = environ['PATH_INFO']
if path_info.startswith(script_name):
environ['PATH_INFO'] = path_info[len(script_name):]
@cached_property
def args(self):
if self.environ.get('QUERY_STRING'):
return url_decode(self.environ['QUERY_STRING'], self.charset)
else:
return {}
@property
def path(self):
path = self.environ.get('PATH_INFO', '')
if PY2:
return path
if path and isinstance(path, bytes):
path = path.decode('utf-8')
return path
def pop_path(self):
path = self.path.lstrip('/')
if '/' in path:
result, rest = path.split('/', 1)
self.environ['PATH_INFO'] = '/' + rest
else:
self.environ['PATH_INFO'] = ''
result = path
if result:
self.environ['SCRIPT_NAME'] = self.environ['SCRIPT_NAME'] + '/' + result
return result
@cached_property
def host(self):
if 'HTTP_X_FORWARDED_HOST' in self.environ:
# might be a list, return first host only
host = self.environ['HTTP_X_FORWARDED_HOST']
host = host.split(',', 1)[0].strip()
return host
elif 'HTTP_HOST' in self.environ:
host = self.environ['HTTP_HOST']
if ':' in host:
port = host.split(':')[1]
if ((self.url_scheme, port) in (('https', '443'), ('http', '80'))):
host = host.split(':')[0]
return host
result = self.environ['SERVER_NAME']
if ((self.url_scheme, self.environ['SERVER_PORT'])
not in (('https', '443'), ('http', '80'))):
result += ':' + self.environ['SERVER_PORT']
return result
@cached_property
def url_scheme(self):
scheme = self.environ.get('HTTP_X_FORWARDED_PROTO')
if not scheme:
scheme = self.environ['wsgi.url_scheme']
return scheme
@cached_property
def host_url(self):
return '%s://%s/' % (self.url_scheme, self.host)
@cached_property
def server_url(self):
return 'http://%s:%s/' % (
self.environ['SERVER_NAME'],
self.environ['SERVER_PORT']
)
@property
def script_url(self):
"Full script URL without trailing /"
return (self.host_url.rstrip('/') +
quote(self.environ.get('SCRIPT_NAME', '/').rstrip('/'))
)
@property
def server_script_url(self):
"Internal script URL"
return self.script_url.replace(
self.host_url.rstrip('/'),
self.server_url.rstrip('/')
)
@property
def base_url(self):
return (self.host_url.rstrip('/')
+ quote(self.environ.get('SCRIPT_NAME', '').rstrip('/'))
+ quote(self.environ.get('PATH_INFO', ''))
)
class RequestParams(object):
"""
This class represents key-value request parameters. It allows case-insensitive
access to all keys. Multiple values for a single key will be concatenated
(eg. to ``layers=foo&layers=bar`` becomes ``layers: foo,bar``).
All values can be accessed as a property.
:param param: A dict or ``NoCaseMultiDict``.
"""
params = None
def __init__(self, param=None):
self.delimiter = ','
if param is None:
self.params = NoCaseMultiDict()
else:
self.params = NoCaseMultiDict(param)
def __str__(self):
return self.query_string
def get(self, key, default=None, type_func=None):
"""
Returns the value for `key` or the `default`. `type_func` is called on the
value to alter the value (e.g. use ``type_func=int`` to get ints).
"""
return self.params.get(key, default, type_func)
def set(self, key, value, append=False, unpack=False):
"""
Set a `value` for the `key`. If `append` is ``True`` the value will be added
to other values for this `key`.
If `unpack` is True, `value` will be unpacked and each item will be added.
"""
self.params.set(key, value, append=append, unpack=unpack)
def update(self, mapping=(), append=False):
"""
Update internal request parameters from an iterable of ``(key, value)``
tuples or a dict.
If `append` is ``True`` the value will be added to other values for
this `key`.
"""
self.params.update(mapping, append=append)
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("'%s' object has no attribute '%s" %
(self.__class__.__name__, name))
def __getitem__(self, key):
return self.delimiter.join(map(text_type, self.params.get_all(key)))
def __setitem__(self, key, value):
"""
Set `value` for the `key`. Does not append values (see ``MapRequest.set``).
"""
self.set(key, value)
def __delitem__(self, key):
if key in self:
del self.params[key]
def iteritems(self):
for key, values in self.params.iteritems():
yield key, self.delimiter.join((text_type(x) for x in values))
def __contains__(self, key):
return self.params and key in self.params
def copy(self):
return self.__class__(self.params)
@property
def query_string(self):
"""
The map request as a query string (the order is not guaranteed).
>>> qs = RequestParams(dict(foo='egg', bar='ham%eggs', baz=100)).query_string
>>> sorted(qs.split('&'))
['bar=ham%25eggs', 'baz=100', 'foo=egg']
"""
kv_pairs = []
for key, values in self.params.iteritems():
value = ','.join(text_type(v) for v in values)
kv_pairs.append(key + '=' + quote(value.encode('utf-8'), safe=','))
return '&'.join(kv_pairs)
def with_defaults(self, defaults):
"""
Return this MapRequest with all values from `defaults` overwritten.
"""
new = self.copy()
for key, value in defaults.params.iteritems():
if value != [None]:
new.set(key, value, unpack=True)
return new
class BaseRequest(object):
"""
This class represents a request with a URL and key-value parameters.
:param param: A dict, `NoCaseMultiDict` or ``RequestParams``.
:param url: The service URL for the request.
:param validate: True if the request should be validated after initialization.
"""
request_params = RequestParams
def __init__(self, param=None, url='', validate=False, http=None, dimensions=None):
self.delimiter = ','
self.http = http
if param is None:
self.params = self.request_params(NoCaseMultiDict())
else:
if isinstance(param, RequestParams):
self.params = self.request_params(param.params)
else:
self.params = self.request_params(NoCaseMultiDict(param))
self.url = url
if validate:
self.validate()
def __str__(self):
return self.complete_url
def validate(self):
pass
@property
def raw_params(self):
params = {}
for key, value in iteritems(self.params):
params[key] = value
return params
@property
def query_string(self):
return self.params.query_string
@property
def complete_url(self):
"""
The complete MapRequest as URL.
"""
if not self.url:
return self.query_string
delimiter = '?'
if '?' in self.url:
delimiter = '&'
if self.url[-1] == '?':
delimiter = ''
return self.url + delimiter + self.query_string
def copy_with_request_params(self, req):
"""
Return a copy of this request ond overwrite all param values from `req`.
Use this method for templates
(``req_template.copy_with_request_params(actual_values)``).
"""
new_params = req.params.with_defaults(self.params)
return self.__class__(param=new_params, url=self.url)
def __repr__(self):
return '%s(param=%r, url=%r)' % (self.__class__.__name__, self.params, self.url)
def split_mime_type(mime_type):
"""
>>> split_mime_type('text/xml; charset=utf-8')
('text', 'xml', 'charset=utf-8')
"""
options = None
mime_class = None
if '/' in mime_type:
mime_class, mime_type = mime_type.split('/', 1)
if ';' in mime_type:
mime_type, options = [part.strip() for part in mime_type.split(';', 2)]
return mime_class, mime_type, options
|
"""Configuration information about NCBI's databases"""
# Used to figure out if efetch supports the start, stop, strand, and
# complexity fields
PUBLICATION_TYPE = 0
SEQUENCE_TYPE = 1
# Map from database name to database type
class DatabaseInfo:
"""stores NCBI's name for the database and its type"""
def __init__(self, db, dbtype):
self.db = db
self.dbtype = dbtype
class DatabaseDict(dict):
"""map from name to DatabaseInfo for that database name
Entries are also available through attributes like PUBMED,
OMIM, and NUCLEOTIDE.
"""
def gettype(self, db, dbtype = None):
"""Given a database name and optional type, return the database type"""
if dbtype not in (None, SEQUENCE_TYPE, PUBLICATION_TYPE):
raise TypeError("Unknown database type: %r" % (dbtype,))
if dbtype is None:
dbtype = self[db].dbtype
return dbtype
databases = DatabaseDict()
def _add_db(x):
databases[x.db] = x
return x.db
# XXX Try these
# <option value="structure">Structure</option>
# <option value="pmc">PMC</option>
# <option value="taxonomy">Taxonomy</option>
# <option value="books">Books</option>
# <option value="geo">ProbeSet</option>
# <option value="domains">3D Domains</option>
# <option value="UniSts">UniSTS</option>
# <option value="cdd">Domains</option>
# <option value="snp">SNP</option>
# <option value="popset">PopSet</option>
databases.PUBMED = _add_db(DatabaseInfo("pubmed", 0))
databases.OMIM = _add_db(DatabaseInfo("omim", 0))
databases.JOURNALS = _add_db(DatabaseInfo("journals", 0))
databases.GENOME = _add_db(DatabaseInfo("genome", 1))
databases.NUCLEOTIDE = _add_db(DatabaseInfo("nucleotide", 1))
databases.PROTEIN = _add_db(DatabaseInfo("protein", 1))
databases.POPSET = _add_db(DatabaseInfo("popset", 1))
databases.SEQUENCES = _add_db(DatabaseInfo("sequences", 1))
databases.UNIGENE = _add_db(DatabaseInfo("unigene", 1))
databases.GENE = _add_db(DatabaseInfo("gene", 1))
# Someday I want to make it easier to get a given format. I would
# rather not have to specify the retmode/rettype pair, but I don't
# know what people want from this feature, so skip for now. Plus,
# it's harder than I thought.
##class FormatInfo:
## def __init__(self, name, retmode):
## self.name = name
## self.retmode = retmode
## self.rettype = rettype
|
import os
from curve.curve import Curve
from typing import List
class EllCurveRefBuilder:
curves: List[Curve]
uRes: int
vRes: int
def __init__(self, curves: List[Curve], uRes: int, vRes: int):
self.curves = curves
self.uRes = uRes
self.vRes = vRes
def build(self, output_path: str):
os.remove(output_path)
f = open(output_path)
|
from everett.manager import ConfigManager, ListOf
CONFIG = ConfigManager.basic_config()
CONFIG_SHORTCUTS = CONFIG("shortcuts", parser=ListOf(str, delimiter=", "))
def _is_shortcut(pstr: str, raise_on_missing: bool = False) -> bool:
# https://github.com/pgierz/dots
if pstr == "https" or pstr == "http":
return False
# as argument: gh:pgierz/dots
# as config: gh=https://githubcom
check_list = [shortcut.split("=")[0] for shortcut in CONFIG_SHORTCUTS]
if pstr.split(":")[0] in check_list:
return True
if raise_on_missing:
raise ValueError(f"Did not understand {pstr} in {check_list}!")
return False
def _dealias_shortcut(pstr: str) -> str:
config_shortcuts_split = [item.split("=") for item in CONFIG_SHORTCUTS]
shortcuts: dict[str, str] = {scut: url for scut, url in config_shortcuts_split}
shortcut, path_stub = pstr.split(":")
dealiased_url = shortcuts.get(shortcut)
if dealiased_url:
return f"{dealiased_url}/{path_stub}"
raise ValueError(f"Did not understand the shortcut {pstr} in {shortcuts}!")
|
import os, sys
from distutils.core import setup, Extension
from distutils.command import build_ext
py_ver = sys.version_info[:2]
include_dirs = ["/usr/local/include"]
library_dirs = ["/usr/local/lib"]
libraries = ["boost_python-py{}{}".format(*py_ver), "stdc++"]
sources = ["src/boost_lagrange.cpp"]
lagrange = Extension(
name="lagrange._lagrange",
include_dirs=include_dirs,
libraries=libraries,
library_dirs=library_dirs,
sources=sources,
extra_compile_args=['-std=c++11']
)
setup(
name="lagrange",
version="1.0.1",
description="C++ implementation of scipy.interpolate.lagrange",
author="mugwort_rc",
author_email="mugwort rc at gmail com",
url="https://github.com/mugwort-rc/boost_python_lagrange",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Programming Language :: C++",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
],
license="BSD",
packages=["lagrange"],
ext_modules=[lagrange],
)
|
from pyplater.utils import Props, classing
def test_classing():
data = ['fff', 'sss ee', ['aa', 'ww', ('az', 's', 'at'), 'un']]
result = classing(data)
assert result == 'fff sss ee aa ww az s at un'
result = classing(*data)
assert result == 'fff sss ee aa ww az s at un'
def test_props():
props = Props({"id": "important", "href": "https://google.com"})
assert props["id"] == "important"
assert props.href == "https://google.com"
|
from sqlalchemy import Column, Integer, String, Text
from sqlalchemy.orm import relationship
from src.database.session import Base
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True, index=True)
username = Column(String, unique=True, index=True)
hashed_password = Column(String)
full_name = Column(String, nullable=True, index=True)
created_at = Column(Text)
modified_at = Column(Text)
notes = relationship("Note", back_populates="author_id")
|
"""Build images only when paths specified are changed"""
import json
import re
import subprocess
with open("images.json") as f:
contents = json.loads(f.read())
def grep(string, search):
return bool(re.search(search, string))
def is_changed(files):
return grep(
subprocess.run(
"git diff $COMMIT_RANGE --name-status",
shell=True,
stdout=subprocess.PIPE,
universal_newlines=True,
).stdout,
f"({'|'.join(files)})",
)
for image, settings in contents.items():
if is_changed(settings["files"]):
subprocess.run(f"./build.sh {image} {settings['dockerfile']}", shell=True, check=True)
|
import argparse
from distutils.util import strtobool
import json
import os
import pickle
import tensorflow as tf
from softlearning.environments.utils import get_environment_from_params
from softlearning.policies.utils import get_policy_from_variant
from softlearning.samplers import rollouts
from softlearning.misc.utils import save_video
DEFAULT_RENDER_KWARGS = {
# 'mode': 'human',
'mode': 'rgb_array',
}
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('checkpoint_path',
type=str,
help='Path to the checkpoint.')
parser.add_argument('--max-path-length', '-l', type=int, default=1000)
parser.add_argument('--num-rollouts', '-n', type=int, default=10)
parser.add_argument('--render-kwargs', '-r',
type=json.loads,
default='{}',
help="Kwargs for rollouts renderer.")
parser.add_argument('--deterministic', '-d',
type=lambda x: bool(strtobool(x)),
nargs='?',
const=True,
default=True,
help="Evaluate policy deterministically.")
parser.add_argument('--use-state-estimator',
type=lambda x: bool(strtobool(x)),
default=False)
args = parser.parse_args()
return args
def simulate_policy(args):
session = tf.keras.backend.get_session()
checkpoint_path = args.checkpoint_path.rstrip('/')
experiment_path = os.path.dirname(checkpoint_path)
variant_path = os.path.join(experiment_path, 'params.pkl')
with open(variant_path, 'rb') as f:
variant = pickle.load(f)
with session.as_default():
pickle_path = os.path.join(checkpoint_path, 'checkpoint.pkl')
with open(pickle_path, 'rb') as f:
picklable = pickle.load(f)
import ipdb; ipdb.set_trace()
environment_params = (
variant['environment_params']['evaluation']
if 'evaluation' in variant['environment_params']
else variant['environment_params']['training'])
if args.use_state_estimator:
environment_params['kwargs'].update({
'pixel_wrapper_kwargs': {
'pixels_only': False,
'normalize': False,
'render_kwargs': {
'width': 32,
'height': 32,
'camera_id': -1,
}
},
'camera_settings': {
'azimuth': 180,
'distance': 0.35,
'elevation': -55,
'lookat': (0, 0, 0.03),
},
})
# obs_keys = environment_params['kwargs'].pop('observation_keys')
# non_object_obs_keys = [obs_key for obs_key in obs_keys if 'object' not in obs_key]
# non_object_obs_keys.append('pixels')
# environment_params['kwargs']['observation_keys'] = tuple(non_object_obs_keys)
# if args.render_mode == 'human':
# if 'has_renderer' in environment_params['kwargs'].keys():
# environment_params['kwargs']['has_renderer'] = True
# variant['environment_params']['evaluation']['task'] = 'TurnFreeValve3ResetFree-v0'
# variant['environment_params']['evaluation']['kwargs']['reset_from_corners'] = True
# 'reward_keys': (
# 'object_to_target_position_distance_cost',
# 'object_to_target_orientation_distance_cost',
# ),
# 'swap_goal_upon_completion': False,
# }
evaluation_environment = get_environment_from_params(environment_params)
policy = (
get_policy_from_variant(variant, evaluation_environment))
policy.set_weights(picklable['policy_weights'])
dump_path = os.path.join(checkpoint_path, 'policy_params.pkl')
with open(dump_path, 'wb') as f:
pickle.dump(picklable['policy_weights'], f)
render_kwargs = {**DEFAULT_RENDER_KWARGS, **args.render_kwargs}
from softlearning.preprocessors.utils import get_state_estimator_preprocessor
state_estimator = get_state_estimator_preprocessor(
state_estimator_path='/home/justinvyu/dev/softlearning-vice/softlearning/models/state_estimators/state_estimator_fixed_antialias.h5',
num_hidden_units=256,
num_hidden_layers=2
)
sampler_kwargs = {
'state_estimator': state_estimator,
'replace_state': True,
}
with policy.set_deterministic(args.deterministic):
paths = rollouts(args.num_rollouts,
evaluation_environment,
policy,
path_length=args.max_path_length,
render_kwargs=render_kwargs,
sampler_kwargs=sampler_kwargs)
if args.render_kwargs.get('mode') == 'rgb_array':
fps = 2 // getattr(evaluation_environment, 'dt', 1/30)
for i, path in enumerate(paths):
video_save_dir = args.checkpoint_path
# video_save_dir = os.path.expanduser('/tmp/simulate_policy/')
video_save_path = os.path.join(video_save_dir, f'episode_{i}.mp4')
save_video(path['images'], video_save_path, fps=fps)
return paths
if __name__ == '__main__':
args = parse_args()
simulate_policy(args)
|
# encoding: utf-8
# module Revit.Filter calls itself Filter
# from RevitNodes,Version=1.2.1.3083,Culture=neutral,PublicKeyToken=null
# by generator 1.145
# no doc
# no imports
# no functions
# classes
class FilterRule(object):
""" Revit Filter Rule """
@staticmethod
def ByRuleType(type, value, parameter):
"""
ByRuleType(type: str,value: object,parameter: Parameter) -> FilterRule
Create a new Filter Rule
type: Filter Rule Type
value: Value to check
parameter: Parameter to filter
"""
pass
RuleType = None
class OverrideGraphicSettings(object):
""" Override Graphic Settings """
@staticmethod
def ByProperties(
cutFillColor,
projectionFillColor,
cutLineColor,
projectionLineColor,
cutFillPattern,
projectionFillPattern,
cutLinePattern,
projectionLinePattern,
cutLineWeight,
projectionLineWeight,
):
"""
ByProperties(cutFillColor: Color,projectionFillColor: Color,cutLineColor: Color,projectionLineColor: Color,cutFillPattern: FillPatternElement,projectionFillPattern: FillPatternElement,cutLinePattern: LinePatternElement,projectionLinePattern: LinePatternElement,cutLineWeight: int,projectionLineWeight: int) -> OverrideGraphicSettings
Create a OverrideGraphicSettings element
cutFillColor: Fill color
projectionFillColor: Projection color
cutLineColor: Cut line color
projectionLineColor: Projection line color
cutFillPattern: Cut fill pattern
projectionFillPattern: Projection fill pattern
cutLinePattern: Cut line pattern
projectionLinePattern: Projection line pattern
cutLineWeight: Cut line weight
projectionLineWeight: Projection line weight
Returns: OverrideGraphicSettings
"""
pass
class ParameterFilterElement(Element, IDisposable, IGraphicItem, IFormattable):
""" Parameter Filter Element """
@staticmethod
def ByRules(name, categories, rules):
""" ByRules(name: str,categories: IEnumerable[Category],rules: IEnumerable[FilterRule]) -> ParameterFilterElement """
pass
def SafeInit(self, *args):
"""
SafeInit(self: Element,init: Action)
Handling exceptions when calling the initializing function
"""
pass
def __enter__(self, *args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __format__(self, *args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __str__(self, *args):
pass
InternalElement = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Reference to the Element
Get: InternalElement(self: ParameterFilterElement) -> Element
"""
InternalElementId = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""The element id for this element
"""
IsAlive = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Is this element still alive in Revit,and good to be drawn,queried etc.
"""
InternalUniqueId = None
|
#!/usr/bin/env python3
"""
@author: Katherine Eaton
Convert treetime mugration output to auspice json.
./treetime_mugration_json.py
"""
# -----------------------------------------------------------------------#
# Modules and Packages #
# -----------------------------------------------------------------------#
import argparse # Command-line argument parsing
import os # Checking files and file extensions
import json # Write json files
from Bio import Phylo # Read and parse trees
# This program should only be called from the command-line
if __name__ != "__main__":
quit()
# -----------------------------------------------------------------------#
# Argument Parsing #
# -----------------------------------------------------------------------#
parser = argparse.ArgumentParser(
description="Convert treetime mugration output to auspice json.", add_help=True
)
# Argument groups for the program
parser.add_argument(
"--tree",
help="Path to the tree file.",
action="store",
dest="treePath",
required=True,
)
parser.add_argument(
"--json", help="Output json file.", action="store", dest="jsonPath", required=True
)
parser.add_argument(
"--trait",
help="Name of the mugration trait.",
action="store",
dest="mugTrait",
required=True,
)
parser.add_argument(
"--conf",
help="Path to the mugration confidence csv.",
action="store",
dest="mugConfPath",
required=True,
)
parser.add_argument(
"--model",
help="Path to the mugration model txt.",
action="store",
dest="mugModelPath",
required=False,
)
# Retrieve user parameters
args = vars(parser.parse_args())
tree_path = args["treePath"]
json_path = args["jsonPath"]
mug_trait = args["mugTrait"]
mug_conf_path = args["mugConfPath"]
mug_model_path = args["mugModelPath"]
filename, file_extension = os.path.splitext(tree_path)
if file_extension == ".nexus" or file_extension == ".nex":
format = "nexus"
elif file_extension == ".nwk" or file_extension == ".newick":
format = "newick"
else:
exit(1)
tree = Phylo.read(tree_path, format)
output_json = open(json_path, "w")
mug_conf = open(mug_conf_path, "r")
node_dict = {"nodes": {}, "models": {}}
# Tree parsing
for c in tree.find_clades():
node_dict["nodes"][c.name] = {}
# Mug confidence parsing
mug_header_list = []
read_line = mug_conf.readline().strip()
# Parse out the header (possible mug trait values)
while read_line[0] == "#":
split_line = read_line.split(",")
for i in range(1, len(split_line)):
mug_header_list.append(split_line[i].strip())
read_line = mug_conf.readline().strip()
# Parse the rest of the mug confidence file
while read_line:
split_line = read_line.split(",")
mug_node_name = split_line[0].strip()
mug_conf_dict = {}
mug_conf_dict_name = mug_trait + "_" + "confidence"
for i in range(1, len(split_line)):
mug_conf_key = mug_header_list[i - 1]
mug_conf_val = float(split_line[i])
mug_conf_dict[mug_conf_key] = mug_conf_val
max_value_keys = [
k
for k in mug_conf_dict.keys()
if mug_conf_dict[k] == max(mug_conf_dict.values())
]
mug_trait_assign = max_value_keys[0]
# Assign the confidence dict to the node
node_dict["nodes"][mug_node_name][mug_trait] = mug_trait_assign
node_dict["nodes"][mug_node_name][mug_conf_dict_name] = mug_conf_dict
read_line = mug_conf.readline().strip()
json_string = json.dumps(
node_dict, default=lambda o: o.__dict__, sort_keys=True, indent=2
)
output_json.write(json_string)
# Cleanup
output_json.close()
mug_conf.close()
|
import unittest
from selenium import webdriver
from time import sleep
class NavigationTest(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome(executable_path='/usr/bin/chromedriver')
driver = self.driver
driver.implicitly_wait(30)
driver.maximize_window()
driver.get('https://the-internet.herokuapp.com/')
driver.find_element_by_link_text('Add/Remove Elements').click()
def test_add_remove(self):
driver = self.driver
elements_added = int(input('How many elements will you add?: '))
elements_removed = int(input('How many elements will you removed?: '))
total_elements = elements_added - elements_removed
add_button = driver.find_element_by_xpath('/html/body/div[2]/div/div/button')
sleep(3)
for i in range(elements_added):
add_button.click()
for i in range(elements_removed):
try:
delete_button = driver.find_elements_by_xpath('/html/body/div[2]/div/div/div/button[1]')
delete_button.click()
except:
print('You\'re trying to delet more elements than exist')
break
if total_elements > 0:
print(f'there are {total_elements} elements on screen')
else:
print('There are 0 elements on the screen')
sleep(3)
def tearDown(self):
self.driver.close()
if __name__ == "__main__":
unittest.main(verbosity = 2)
|
def conv2DOutsize(size, kernel_size = 5, stride = 2):
return (size - (kernel_size - 1) - 1) // stride + 1
|
class Solution:
"""
@param arr: an array of non-negative integers
@return: minimum number of elements
"""
def minElements(self, arr):
# write your code here
arr.sort()
res = 0
total_old, total_new = sum(arr), 0
while total_old > total_new:
res += 1
last = arr.pop()
total_old -= last
total_new += last
return res
|
import pygame
from lge.LittleGameEngine import LittleGameEngine
from lge.Sprite import Sprite
from lge.Rectangle import Rectangle
class Platform(Sprite):
def __init__(self, x, y, dir, distance, speed):
super().__init__("platform", (x, y))
# acceso a LGE
self.lge = LittleGameEngine.getInstance()
# los eventos que recibiremos
self.setOnEvents(LittleGameEngine.E_ON_UPDATE)
self.setCollider(Rectangle((0, 0), (self.getWidth(), 1)))
self.enableCollider(True)
self.setTag("plataforma")
# mis atributos
self.dir = dir
self.pixels = speed
self.distance = distance
self.travel = 0
def onUpdate(self, dt):
x, y = self.getPosition()
d = self.pixels * dt
if(self.dir == "R"):
x = x + d
elif(self.dir == "L"):
x = x - d
elif(self.dir == "D"):
y = y + d
elif(self.dir == "U"):
y = y - d
self.setPosition(x, y)
self.travel = self.travel + d
if(self.travel > self.distance):
self.travel = 0
if(self.dir == "R"):
self.dir = "L"
elif(self.dir == "L"):
self.dir = "R"
elif(self.dir == "D"):
self.dir = "U"
elif(self.dir == "U"):
self.dir = "D"
|
#!/usr/bin/env python
#
# This file is part of the Fun SDK (fsdk) project. The complete source code is
# available at https://github.com/luigivieira/fsdk.
#
# Copyright (c) 2016-2017, Luiz Carlos Vieira (http://www.luiz.vieira.nom.br)
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import argparse
import cv2
import glob
import csv
import numpy as np
from collections import OrderedDict
from datetime import datetime, timedelta
if __name__ == '__main__':
import sys
sys.path.append('../../')
from fsdk.features.data import FaceData, EmotionData, BlinkData
#---------------------------------------------
class VideoData:
"""
Helper class to load and present the annotation information when viewing
a video.
"""
#-----------------------------------------
def __init__(self):
"""
Class constructor.
"""
self._faces = OrderedDict()
"""
Annotation of the face (region and landmarks) detected on each frame of
the video.
"""
self._emotions = OrderedDict()
"""
Annotation of the prototypical emotions detected on each frame of the
video.
"""
self._blinks = OrderedDict()
"""
Annotation of the blink count and rate accounted on each frame of the
video.
"""
#-----------------------------------------
def load(self, annotationPath, baseFileName):
"""
Loads the video data from the annotation files.
Parameters
----------
annotationPath: str
Path where to find the annotation files.
baseFileName: str
Name of the video file to used as the base name to locate the
annotation files. The face data file is searched as
<annotationPath>/<baseFileName>-face.csv, for example.
Returns
-------
ret: bool
Indication of success or failure. Failure is due to wrong annotation
path or error reading the annotation files. In any case, a message
is printed with the reason of failure.
"""
fcFilename = '{}/{}-face.csv'.format(annotationPath, baseFileName)
emFilename = '{}/{}-emotions.csv'.format(annotationPath, baseFileName)
bkFilename = '{}/{}-blinks.csv'.format(annotationPath, baseFileName)
if not os.path.isfile(fcFilename) or not os.path.isfile(emFilename) or \
not os.path.isfile(bkFilename):
print('One or more of the annotation files ({}-*.csv) could not be '
'found in path {}'.format(baseFileName, annotationPath))
return False
print('Loading video data...')
# Read the face data of each video frame
faces = OrderedDict()
try:
file = open(fcFilename, 'r', newline='')
reader = csv.reader(file, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL)
next(reader, None) # skip the header
for row in reader:
frameNum = int(row[0])
faces[frameNum] = FaceData()
faces[frameNum].fromList(row[1:])
except:
print('Could not read file {}'.format(fcFilename))
return -2
finally:
file.close()
# Read the emotions data of each video frame
emotions = OrderedDict()
try:
file = open(emFilename, 'r', newline='')
reader = csv.reader(file, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL)
next(reader, None) # skip the header
for row in reader:
frameNum = int(row[0])
emotions[frameNum] = EmotionData()
emotions[frameNum].fromList(row[1:])
except Exception as e:
print('Could not read file {}'.format(emFilename))
return -2
finally:
file.close()
# Read the blinks data of each video frame
blinks = OrderedDict()
try:
file = open(bkFilename, 'r', newline='')
reader = csv.reader(file, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL)
next(reader, None) # skip the header
for row in reader:
frameNum = int(row[0])
blinks[frameNum] = BlinkData()
blinks[frameNum].fromList(row[1:])
except:
print('Could not read file {}'.format(bkFilename))
return -2
finally:
file.close()
print('Done.')
self._faces = faces
self._emotions = emotions
self._blinks = blinks
return True
#-----------------------------------------
def draw(self, frameNum, frame):
"""
Draws the video data of the given frame number in the given image.
Parameters
----------
frameNum: int
Number of the frame whose information should be drawn.
frame: numpy.ndarray
Image where to draw the information.
"""
# Font settings
font = cv2.FONT_HERSHEY_SIMPLEX
scale = 0.5
thick = 1
glow = 3 * thick
# Color settings
black = (0, 0, 0)
white = (255, 255, 255)
yellow = (0, 255, 255)
red = (0, 0, 255)
empty = True
# Plot the face landmarks and face distance
x = 5
y = 0
w = int(frame.shape[1]* 0.2)
try:
face = self._faces[frameNum]
empty = face.isEmpty()
face.draw(frame)
if not empty:
# Draw the header
text = 'face'
size, _ = cv2.getTextSize(text, font, scale, thick)
y += size[1] + 10
cv2.putText(frame, text, (x, y), font, scale, black, glow)
cv2.putText(frame, text, (x, y), font, scale, white, thick)
y += 5
cv2.line(frame, (x,y), (x+w,y), white, 1)
# Draw the estimated distance
text = 'distance:'
size, _ = cv2.getTextSize(text, font, scale, thick)
t = size[0] + 10
y += size[1] + 10
cv2.putText(frame, text, (x+20, y), font, scale, black, glow)
cv2.putText(frame, text, (x+20, y), font, scale, white, thick)
text = '{:.2f}'.format(face.distance)
cv2.putText(frame, text, (x+20+t, y), font, scale, black, glow)
cv2.putText(frame, text, (x+20+t, y), font, scale, white, thick)
# Draw the blink rate
text = 'gradient:'
size, _ = cv2.getTextSize(text, font, scale, thick)
y += size[1] + 10
cv2.putText(frame, text, (x+20, y), font, scale, black, glow)
cv2.putText(frame, text, (x+20, y), font, scale, white, thick)
text = '{:.2f}'.format(face.gradient)
cv2.putText(frame, text, (x+20+t, y), font, scale, black, glow)
cv2.putText(frame, text, (x+20+t, y), font, scale, white, thick)
size, _ = cv2.getTextSize(text, font, scale, thick)
#y += size[1] + 10
except:
pass
# Plot the blink count and rate
try:
blink = self._blinks[frameNum]
if not empty:
# Draw the header
text = 'blinks'
size, _ = cv2.getTextSize(text, font, scale, thick)
y += size[1] + 20
cv2.putText(frame, text, (x, y), font, scale, black, glow)
cv2.putText(frame, text, (x, y), font, scale, white, thick)
y += 5
cv2.line(frame, (x,y), (x+w,y), white, 1)
# Draw the blink count
text = 'rate (per minute):'
size, _ = cv2.getTextSize(text, font, scale, thick)
t = size[0] + 10
text = 'count:'
size, _ = cv2.getTextSize(text, font, scale, thick)
y += size[1] + 10
cv2.putText(frame, text, (x+20, y), font, scale, black, glow)
cv2.putText(frame, text, (x+20, y), font, scale, white, thick)
text = '{}'.format(blink.count)
cv2.putText(frame, text, (x+20+t, y), font, scale, black, glow)
cv2.putText(frame, text, (x+20+t, y), font, scale, white, thick)
# Draw the blink rate
text = 'rate (per minute):'
size, _ = cv2.getTextSize(text, font, scale, thick)
y += size[1] + 10
cv2.putText(frame, text, (x+20, y), font, scale, black, glow)
cv2.putText(frame, text, (x+20, y), font, scale, white, thick)
text = '{}'.format(blink.rate)
cv2.putText(frame, text, (x+20+t, y), font, scale, black, glow)
cv2.putText(frame, text, (x+20+t, y), font, scale, white, thick)
size, _ = cv2.getTextSize(text, font, scale, thick)
#y += size[1] + 10
except:
pass
# Plot the emotion probabilities
try:
emotions = self._emotions[frameNum]
if empty:
labels = []
values = []
else:
labels = [s.split('.')[1] for s in EmotionData.header()]
values = emotions.toList()
bigger = labels[values.index(max(values))]
# Draw the header
text = 'emotions'
size, _ = cv2.getTextSize(text, font, scale, thick)
y += size[1] + 20
cv2.putText(frame, text, (x, y), font, scale, black, glow)
cv2.putText(frame, text, (x, y), font, scale, white, thick)
y += 5
cv2.line(frame, (x,y), (x+w,y), white, 1)
size, _ = cv2.getTextSize('happiness', font, scale, thick)
t = size[0] + 20
w = 150
h = size[1]
for l, v in zip(labels, values):
lab = '{}:'.format(l)
val = '{:.2f}'.format(v)
size, _ = cv2.getTextSize(l, font, scale, thick)
# Set a red color for the emotion with bigger probability
color = red if l == bigger else white
y += size[1] + 15
# Draw the outside rectangle
p1 = (x+t, y-size[1]-5)
p2 = (x+t+w, y-size[1]+h+5)
cv2.rectangle(frame, p1, p2, color, 1)
# Draw the filled rectangle proportional to the probability
p2 = (p1[0] + int((p2[0] - p1[0]) * v), p2[1])
cv2.rectangle(frame, p1, p2, color, -1)
# Draw the emotion label
cv2.putText(frame, lab, (x, y), font, scale, black, glow)
cv2.putText(frame, lab, (x, y), font, scale, color, thick)
# Draw the value of the emotion probability
cv2.putText(frame, val, (x+t+5, y), font, scale, black, glow)
cv2.putText(frame, val, (x+t+5, y), font, scale, white, thick)
except:
pass
#---------------------------------------------
def main(argv):
"""
Main entry of this script.
Parameters
------
argv: list of str
Arguments received from the command line.
"""
# Parse the command line
args = parseCommandLine(argv)
parts = os.path.split(args.videoFilename)
videoPath = parts[0]
videoName = os.path.splitext(parts[1])[0]
if not os.path.isabs(args.annotationPath):
annotationPath = '{}/{}'.format(videoPath, args.annotationPath)
else:
annotationPath = args.annotationPath
annotationPath = os.path.normpath(annotationPath)
# Load the video data
data = VideoData()
if not data.load(annotationPath, videoName):
return -1
# Load the video
video = cv2.VideoCapture(args.videoFilename)
if not video.isOpened():
print('Error opening video file {}'.format(args.videoFilename))
sys.exit(-1)
fps = int(video.get(cv2.CAP_PROP_FPS))
frameCount = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
# Text settings
font = cv2.FONT_HERSHEY_SIMPLEX
scale = 1
thick = 1
glow = 3 * thick
# Color settings
color = (255, 255, 255)
paused = False
frameNum = 0
# Process the video input
while True:
if not paused:
start = datetime.now()
ret, img = video.read()
if ret:
frame = img.copy()
else:
paused = True
drawInfo(frame, frameNum, frameCount, paused, fps)
data.draw(frameNum, frame)
cv2.imshow(args.videoFilename, frame)
if paused:
key = cv2.waitKey(0)
else:
end = datetime.now()
delta = (end - start)
delay = int(max(1, ((1 / fps) - delta.total_seconds()) * 1000))
key = cv2.waitKey(delay)
if key == ord('q') or key == ord('Q') or key == 27:
break
elif key == ord('p') or key == ord('P'):
paused = not paused
elif key == ord('c') or key == ord('C'):
cv2.imwrite('frame.png', img)
cv2.imwrite('drawn.png', frame)
face = data._faces[frameNum]
if any(i != 0 for i in face.region):
img,_ = face.crop(img)
cv2.imwrite('cropped.png', img)
else:
try:
os.remove('cropped.png')
except:
pass
elif key == ord('r') or key == ord('R'):
frameNum = 0
video.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
elif paused and key == 2424832: # Left key
frameNum -= 1
if frameNum < 0:
frameNum = 0
video.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
elif paused and key == 2555904: # Right key
frameNum += 1
if frameNum >= frameCount:
frameNum = frameCount - 1
elif key == 2162688: # Pageup key
frameNum -= (fps * 10)
if frameNum < 0:
frameNum = 0
video.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
elif key == 2228224: # Pagedown key
frameNum += (fps * 10)
if frameNum >= frameCount:
frameNum = frameCount - 1
video.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
elif key == 7340032: # F1
showHelp(args.video, frame.shape)
if not paused:
frameNum += 1
video.release()
cv2.destroyAllWindows()
#---------------------------------------------
def drawInfo(frame, frameNum, frameCount, paused, fps):
"""
Draws text info related to the given frame number into the frame image.
Parameters
----------
image: numpy.ndarray
Image data where to draw the text info.
frameNum: int
Number of the frame of which to drawn the text info.
frameCount: int
Number total of frames in the video.
paused: bool
Indication if the video is paused or not.
fps: int
Frame rate (in frames per second) of the video for time calculation.
"""
# Font settings
font = cv2.FONT_HERSHEY_SIMPLEX
scale = 0.5
thick = 1
glow = 3 * thick
# Color settings
black = (0, 0, 0)
yellow = (0, 255, 255)
# Print the current frame number and timestamp
text = 'Frame: {:d}/{:d} {}'.format(frameNum, frameCount - 1,
'(paused)' if paused else '')
size, _ = cv2.getTextSize(text, font, scale, thick)
x = 5
y = frame.shape[0] - 2 * size[1]
cv2.putText(frame, text, (x, y), font, scale, black, glow)
cv2.putText(frame, text, (x, y), font, scale, yellow, thick)
timestamp = datetime.min + timedelta(seconds=(frameNum / fps))
elapsedTime = datetime.strftime(timestamp, '%H:%M:%S')
timestamp = datetime.min + timedelta(seconds=(frameCount / fps))
totalTime = datetime.strftime(timestamp, '%H:%M:%S')
text = 'Time: {}/{}'.format(elapsedTime, totalTime)
size, _ = cv2.getTextSize(text, font, scale, thick)
y = frame.shape[0] - 5
cv2.putText(frame, text, (x, y), font, scale, black, glow)
cv2.putText(frame, text, (x, y), font, scale, yellow, thick)
# Print the help message
text = 'Press F1 for help'
size, _ = cv2.getTextSize(text, font, scale, thick)
x = frame.shape[1] - size[0] - 5
y = frame.shape[0] - size[1] + 5
cv2.putText(frame, text, (x, y), font, scale, black, glow)
cv2.putText(frame, text, (x, y), font, scale, yellow, thick)
#---------------------------------------------
def showHelp(windowTitle, shape):
"""
Displays an image with helping text.
Parameters
----------
windowTitle: str
Title of the window where to display the help
shape: tuple
Height and width of the window to create the help image.
"""
# Font settings
font = cv2.FONT_HERSHEY_SIMPLEX
scale = 1.0
thick = 1
# Color settings
black = (0, 0, 0)
red = (0, 0, 255)
# Create the background image
image = np.ones((shape[0], shape[1], 3)) * 255
# The help text is printed in one line per item in this list
helpText = [
'Controls:',
'-----------------------------------------------',
'[q] or [ESC]: quits from the application.',
'[p]: toggles paused/playing the video.',
'[r]: restarts the video playback.',
'[left/right arrow]: displays the previous/next frame (only when paused).',
'[page-up/down]: rewinds/fast forwards the video by 10 seconds.',
' ',
' ',
'Press any key to close this window...'
]
# Print the controls help text
xCenter = image.shape[1] // 2
yCenter = image.shape[0] // 2
margin = 20 # between-lines margin in pixels
textWidth = 0
textHeight = margin * (len(helpText) - 1)
lineHeight = 0
for line in helpText:
size, _ = cv2.getTextSize(line, font, scale, thick)
textHeight += size[1]
textWidth = size[0] if size[0] > textWidth else textWidth
lineHeight = size[1] if size[1] > lineHeight else lineHeight
x = xCenter - textWidth // 2
y = yCenter - textHeight // 2
for line in helpText:
cv2.putText(image, line, (x, y), font, scale, black, thick * 3)
cv2.putText(image, line, (x, y), font, scale, red, thick)
y += margin + lineHeight
# Show the image and wait for a key press
cv2.imshow(windowTitle, image)
cv2.waitKey(0)
#---------------------------------------------
def parseCommandLine(argv):
"""
Parse the command line of this utility application.
This function uses the argparse package to handle the command line
arguments. In case of command line errors, the application will be
automatically terminated.
Parameters
------
argv: list of str
Arguments received from the command line.
Returns
------
object
Object with the parsed arguments as attributes (refer to the
documentation of the argparse package for details)
"""
parser = argparse.ArgumentParser(description='Shows videos with facial data'
' produced by the FSDK project frame by'
' frame.')
parser.add_argument('videoFilename',
help='Video file with faces to display. The supported '
'formats depend on the codecs installed in the '
'operating system.')
parser.add_argument('annotationPath', nargs='?',
default='../annotation/',
help='Path where to find the annotation files with the '
'FSDK data related to the video. The default is '
'\'..\\annotation\' (relative to the path of the video '
'opened).'
)
parser.add_argument('-s', '--start',
type=int,
default=0,
help='Number of the frame from where to start the '
'video playback. The default is 0.'
)
return parser.parse_args()
#---------------------------------------------
# namespace verification for invoking main
#---------------------------------------------
if __name__ == '__main__':
main(sys.argv[1:])
|
#!/usr/bin/python
import pandas as pd
import sys
sys.path.append('../')
from Binance import Binance
import logging.config
import logging.handlers
import logging
import os
# this logging configuration is sketchy
binance = logging.getLogger(__name__)
logging.config.fileConfig('logging.ini')
# create Binance object
bn = Binance()
# set keys
bn.setSecretKey('NhqPtmdSJYdKjVHjA7PZj4Mge3R5YNiP1e3UZjInClVN65XAbvqqM6A7H5fATj0j')
bn.setAPIKey('vmPUZE6mv9SD5VNHk4HlWFsOr6aKE2zvsw0MuIgwCIPy6utIco14y7Ju91duEh8A')
# getAccountInfo
print "---------------- getAccountInfo --------------"
print "################################# POSITIVE TESTS (returns 1 or r) ###################"
queryParams = {}
print "****test empty inputs, timestamp autogenerated"
test = bn.getAccountInfo(queryParams)
print
queryParams = {'timestamp':1507770491000}
print "****test timestamp supplied"
test = bn.getAccountInfo(queryParams)
print
queryParams = {'recvWindow':123435234}
print "****test timestamp autogenerated, optional params"
test = bn.getAccountInfo(queryParams)
print
queryParams = {'timestamp':1507770491000,'recvWindow':123435234}
print "****test valid timestamp supplied, optional params"
test = bn.getAccountInfo(queryParams)
print
print "################################# NEGATIVE TESTS (returns 0) ###################"
print
queryParams = {'timestamp':12.5}
print "****test valid mandatory inputs present with invalid type"
test = bn.getAccountInfo(queryParams)
print
queryParams = {'recvWindow':'123456778','timestamp':150774295}
print "****test invalid user proved timestamp, plus some valid optional"
test = bn.getAccountInfo(queryParams)
print
queryParams = {'timestamp':'abcdefghijklm'}
print "****test invalid user proved timestamp type but length ok, plus some optional"
test = bn.getAccountInfo(queryParams)
print
queryParams = {'sharkbite':'abcdefghijklm'}
print "****test random input invalid value"
test = bn.getAccountInfo(queryParams)
print
queryParams = {'sharkFLOAT':10.11}
print "****test random input invalid value"
test = bn.getAccountInfo(queryParams)
print
queryParams = {'sharkINTEGER':10}
print "****test random input invalid value"
test = bn.getAccountInfo(queryParams)
|
from lib import action
class KeycloakGroupCreateAction(action.KeycloakBaseAction):
def run(self, name, path, clientRoles={}, realmRoles=[], subGroups={},
parent=None, skip_exists=True):
payload = {}
payload['name'] = name
payload['path'] = path
payload['clientRoles'] = clientRoles
payload['realmRoles'] = realmRoles
payload['subGroups'] = subGroups
self.keycloak_admin.create_group(payload=payload, parent=parent, skip_exists=skip_exists)
|
from setuptools import setup, find_packages
setup(
name='synthpop',
version='0.1.1',
description='Population Synthesis',
author='UrbanSim Inc.',
author_email='udst@urbansim.com',
license='BSD',
url='https://github.com/udst/synthpop',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
packages=find_packages(exclude=['*.tests']),
install_requires=[
'census>=0.5',
'numexpr>=2.3.1',
'numpy>=1.16.5 ',
'pandas>=0.15.0',
'scipy>=0.13.3',
'us>=0.8'
]
)
|
from back.mongo.data.collect.metas.model import *
from back.mongo.data.collect.metas.mongo import *
|
# Generated by Django 2.2.13 on 2020-08-02 10:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stats', '0012_auto_20190427_0202'),
]
operations = [
migrations.AlterField(
model_name='gamesession',
name='server',
field=models.BigIntegerField(verbose_name='server'),
),
migrations.AlterField(
model_name='loggedmessage',
name='discord_id',
field=models.BigIntegerField(unique=True),
),
migrations.AlterField(
model_name='loggedmessage',
name='server',
field=models.BigIntegerField(verbose_name='server'),
),
]
|
#!/usr/bin/python3
import sys
def main():
# usage: ./html_gen.py
# input: a list of names (in any format) and then an email address
# (separated by a ' - ')
# output: some html containing contact information for a given list of names
# html is dependent on an existing file with '{}' somewhere in it.
# content will be placed at the position of '{}'
DELIMITER = " - "
employees_html = open("./public/employees.html", "r").read().split('{}')
print(employees_html[0].strip())
for line in sys.stdin:
name, email = line.strip().split(DELIMITER)
html_line = f"<tr><td>{name}</td><td>{email}</td></tr>"
print(html_line)
print(employees_html[1][1:])
if __name__ == "__main__": main()
|
from chat.views import handle_chat, index_greet
def setup_routes(app):
app.router.add_get('/ws/hi/', index_greet)
app.router.add_get('/ws/chats/{chat_id}', handle_chat)
|
#!/usr/bin/env python3
from __future__ import (absolute_import, division, print_function)
from datetime import datetime, timedelta
import json
import os
import requests # python-requests
import smtplib
import sys
import time
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
# Some links about html formatting email
# https://css-tricks.com/using-css-in-html-emails-the-real-story/
# http://templates.mailchimp.com/?_ga=1.192270638.523823784.1476384638
# global variables are bad
jobsAll = {}
COLORS = {
'blue': 'black',
'yellow': 'yellow',
'red': 'red',
'disabled': 'grey',
'aborted': 'grey',
'': 'black',
'SUCCESS': 'black',
'FAILURE': 'red',
'UNSTABLE': 'yellow',
'ABORTED': 'grey',
'DISABLED': 'grey',
'NEVER': 'grey'
}
def buildResultToHtml(result):
return '<font color=\'{}\'>{}</font>'.format(COLORS[result], result)
def timestampToHtml(timestamp):
date = timestamp.split('T')[0]
return '<time datetime="{}">{}</time>'.format(timestamp, date)
class BuildJob:
def __init__(self, url, prev):
params = {}
# convert known status to something more usable
known_status = {}
for key in prev.keys():
known_status[int(key)] = prev[key]
keys = list(known_status.keys())
keys.sort()
if len(keys) > 0:
self.numberLast = keys[-1]
self.resultLast = known_status[self.numberLast]
else:
self.numberLast = ''
self.resultLast = ''
if not url.startswith(JOB_URL):
url = os.path.join(JOB_URL, url)
self.urlJob = url
if not url.endswith('api/json'):
url = os.path.join(url, 'api/json')
req = requests.get(url, timeout=15., params=params)
if req.status_code != 200:
raise RuntimeError('failed to get information from %s' % url)
self.name = req.json()['displayName']
self.color = req.json()['color']
lastCompletedBuild = req.json()['lastCompletedBuild']
if lastCompletedBuild is None: # set as unset and return early
self.number = 0
self.urlBuild = None
self.result = 'NEVER'
return
self.number = lastCompletedBuild['number']
self.urlBuild = lastCompletedBuild['url']
url = self.urlBuild + '/api/json'
if self.number in known_status.keys():
self.result = known_status[self.number]
else:
req = requests.get(url, timeout=15., params=params)
if req.status_code != 200:
raise RuntimeError('failed to get information from %s' % url)
self.result = req.json()['result']
# time stamp is in client's local timezone
timestamp = datetime.fromtimestamp(req.json()['timestamp'] / 1000.) # convert to correct units
utc_offset = time.altzone if time.localtime().tm_isdst else time.timezone
utc_offset = timedelta(seconds=-utc_offset)
timestamp += utc_offset
self.timestamp = '{}Z'.format(timestamp.replace(microsecond=0).isoformat())
def show(self):
if self.result == 'SUCCESS' and \
self.result == self.resultLast:
return False
return True
def __str__(self):
return '%35s %6s %8s %6d %8s' % (self.name, str(self.numberLast), self.resultLast, self.number, self.result)
def __statusToHtml(url, number, result):
number = '<a href=\'%s\'>%d</a>' % (url, number)
result = buildResultToHtml(result)
return '<td>%s</td><td>%s</td>' % (number, result)
def toHtml(self, bgcolor=None):
name = '<td><a href=\'%s\'>%s</a></td>' % (self.urlJob, self.name)
if len(self.resultLast) > 0 and self.number != self.numberLast:
jobUrl = self.urlBuild.replace(str(self.number), str(self.numberLast))
previous = BuildJob.__statusToHtml(jobUrl, self.numberLast, self.resultLast)
else:
previous = '<td COLSPAN=\'2\'> </td>'
current = BuildJob.__statusToHtml(self.urlBuild, self.number, self.result)
cells = [name, previous, current]
result = '<tr'
if bgcolor is not None:
result += ' bgcolor=\'%s\'' % bgcolor
result += '>%s</tr>\n' % '\n'.join(cells)
return result
class JobsList:
def __init__(self, name, prev={}, jobs=None):
self.name = name
self.prev = prev.get(name, {})
if jobs is not None:
self.jobs = jobs
else:
url = name.replace(' ', '%20')
if not url.startswith(VIEW_URL):
url = os.path.join(VIEW_URL, url)
self.__jobsFromView(url)
def __jobsFromView(self, url):
if not url.endswith('api/json'):
url = os.path.join(url, 'api/json')
req = requests.get(url, timeout=15.)
if req.status_code != 200:
raise RuntimeError('failed to get information from %s' % url)
self.jobs = [job['name'] for job in req.json()['jobs']]
self.jobs.sort()
def __getJob(self, name):
if name in jobsAll:
job = jobsAll[name]
else:
prev = self.prev.get(name, {})
job = BuildJob(name, prev)
jobsAll[job.name] = job
return job
def __str__(self):
text = self.name + '\n'
text += '-' * len(self.name) + '\n'
for name in self.jobs:
job = self.__getJob(name)
if job.show():
text += str(job) + '\n'
return text
def toHtml(self):
text = '<tr><th bgcolor="#424242" COLSPAN="5"><b><font color="#EEEEEE">'
text += '{}</font></b></th></tr>\n'.format(self.name)
i = 0
for name in self.jobs:
job = self.__getJob(name)
if job.show():
if i % 2:
text += job.toHtml('#E6E6E6')
else:
text += job.toHtml()
i += 1
return text
def toDict(self):
result = {}
for name in self.jobs:
job = self.__getJob(name)
result[name] = {job.number: job.result}
return result
# ################### read in the configuration
configfile = os.path.expanduser('~/.build-list.config')
if len(sys.argv) == 2:
configfile = sys.argv[1]
if not os.path.exists(configfile):
print('Did not find configuration file "{}"'.format(configfile))
print('Either supply one as an argument or create default one')
sys.exit(-1)
print('Loading configuration from \'%s\'' % configfile)
with open(configfile, 'r') as handle:
lines = handle.readlines()
lines = [line.strip() for line in lines]
(BASE_URL, email_from, email_to, email_smtp) = lines
print(' base_url =', BASE_URL)
print(' from =', email_from)
print(' to =', email_to) # can separate addresses with ';'
print(' smtp =', email_smtp)
VIEW_URL = os.path.join(BASE_URL, 'view')
JOB_URL = os.path.join(BASE_URL, 'job')
# ################### load the last round of information
last_file = os.path.expanduser('~/.build-list.json')
if os.path.exists(last_file):
print('Loading last known states from \'%s\'' % last_file)
with open(last_file, 'r') as handle:
last_dict = json.load(handle)
else:
last_dict = {}
DEPLOY_JOBS = [
'isis_task_copy_mantidnightly_rpm', 'ornl_task_copy_mantidnightly_rpm', 'isis_task_copy_mantidnightly_deb',
'master_create_conda_linux_pkgs'
]
# ################### generate the report
print('Collecting information about jobs')
msg_dict = {}
msg_text = ''
msg_html = '<html><head></head><body>\n'
msg_html += '<ul>\n'
for name in DEPLOY_JOBS:
job = BuildJob(name, {})
msg_text += '{} {} {}\n'.format(job.timestamp, job.result, job.name)
msg_html += '<li>{} {} <a href="{}">{}</a></td></li>\n'.format(timestampToHtml(job.timestamp),
buildResultToHtml(job.result), job.urlJob, job.name)
msg_html += '</ul>\n\n'
msg_html += '<table>\n'
jobsList = JobsList('Critical Jobs', last_dict)
msg_text += str(jobsList)
msg_html += jobsList.toHtml()
msg_dict[jobsList.name] = jobsList.toDict()
jobsList = JobsList('Master Pipeline', last_dict)
msg_text += str(jobsList)
msg_html += jobsList.toHtml()
msg_dict[jobsList.name] = jobsList.toDict()
jobsList = JobsList('Static Analysis', last_dict)
msg_text += str(jobsList)
msg_html += jobsList.toHtml()
msg_dict[jobsList.name] = jobsList.toDict()
jobsList = JobsList('Release Pipeline', last_dict)
msg_text += str(jobsList)
msg_html += jobsList.toHtml()
msg_dict[jobsList.name] = jobsList.toDict()
jobs = []
if len(jobs) > 0:
jobsList = JobsList('Builds of Interest', last_dict, jobs)
msg_text += str(jobsList)
msg_html += jobsList.toHtml()
msg_dict[jobsList.name] = jobsList.toDict()
msg_html += '</table>\n'
msg_html += '<p>Jobs that succeeded the last two times they were ' \
'run are not displayed</p>\n'
msg_html += '</body></html>'
# ################### save the last round of information
print('Saving last known states to \'%s\'' % last_file)
with open(last_file, 'w') as handle:
json.dump(msg_dict, handle)
# ################### debug print message
print('********************')
print(msg_text, )
print('********************')
# print(msg_html)
# print('********************')
# ################### send the email
print('Sending email')
msg = MIMEMultipart('alternative')
msg['Subject'] = 'Summary of build status'
msg['From'] = email_from
msg['To'] = email_to
# Record the MIME types of both parts - text/plain and text/html.
part1 = MIMEText(msg_text, 'plain')
part2 = MIMEText(msg_html, 'html')
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
msg.attach(part1)
msg.attach(part2)
# Send the message via local SMTP server.
with smtplib.SMTP(email_smtp) as s:
# sendmail function takes 3 arguments: sender's address,
# recipient's address and message to send - here it is
# sent as one string.
s.sendmail(email_from, email_to, msg.as_string())
s.quit()
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# Copyright 2020 Ronald Seoh
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Backported Keras Layers from the HuggingFace library.
TFF currently do not support Keras subclass models. Hence we take the individual layers within
the original Huggingface models and put them together again as a Keras functional model.
However, apart from the layers decorated with @keras_serializable (mostly the main layer part),
the head layers cannot be directly imported as they use their own config object to initialize
each layers.
Probably not the best solution, but for now we've decided to backport the relevant Keras layer
classes from Huggingface here and make them Keras serializable by directly feeding in relevant
parameters to the __init__().
"""
import tensorflow as tf
import transformers
class StandaloneTFMobileBertPredictionHeadTransform(tf.keras.layers.Layer):
def __init__(self,
hidden_size, hidden_act, initializer_range, layer_norm_eps,
**kwargs):
super().__init__(**kwargs)
# Let's store all the inputs first
self.hidden_size = hidden_size
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.dense = tf.keras.layers.Dense(
self.hidden_size,
kernel_initializer=transformers.modeling_tf_utils.get_initializer(self.initializer_range),
name="dense")
if isinstance(hidden_act, str):
self.transform_act_fn = transformers.activations_tf.get_tf_activation(self.hidden_act)
else:
self.transform_act_fn = self.hidden_act
self.LayerNorm = transformers.modeling_tf_mobilebert.TFLayerNorm(
self.hidden_size, epsilon=self.layer_norm_eps, name="LayerNorm")
def call(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
def get_config(self):
return {
"hidden_size": self.hidden_size,
"hidden_act" : self.hidden_act,
"initializer_range": self.initializer_range,
"layer_norm_eps": self.layer_norm_eps,
}
class StandaloneTFMobileBertLMPredictionHead(tf.keras.layers.Layer):
def __init__(self,
hidden_size, hidden_act, initializer_range, layer_norm_eps,
vocab_size, embedding_size,
**kwargs):
super().__init__(**kwargs)
# Let's store all the inputs first
self.hidden_size = hidden_size
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.transform = StandaloneTFMobileBertPredictionHeadTransform(
self.hidden_size,
self.hidden_act,
self.initializer_range,
self.layer_norm_eps,
name="transform"
)
def build(self, input_shape):
self.bias = self.add_weight(
shape=(self.vocab_size,),
initializer="zeros",
trainable=True,
name="bias")
self.dense = self.add_weight(
shape=(self.hidden_size - self.embedding_size, self.vocab_size),
initializer="zeros",
trainable=True,
name="dense/weight",
)
self.decoder = self.add_weight(
shape=(self.vocab_size, self.embedding_size),
initializer="zeros",
trainable=True,
name="decoder/weight",
)
super().build(input_shape)
def call(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = tf.matmul(hidden_states, tf.concat([tf.transpose(self.decoder), self.dense], axis=0))
hidden_states = hidden_states + self.bias
return hidden_states
def get_config(self):
return {
"hidden_size": self.hidden_size,
"hidden_act" : self.hidden_act,
"initializer_range": self.initializer_range,
"layer_norm_eps": self.layer_norm_eps,
"vocab_size": self.vocab_size,
"embedding_size": self.embedding_size,
}
class StandaloneTFMobileBertMLMHead(tf.keras.layers.Layer):
def __init__(self,
hidden_size, hidden_act, initializer_range, layer_norm_eps,
vocab_size, embedding_size,
**kwargs):
super().__init__(**kwargs)
# Let's store all the inputs first
self.hidden_size = hidden_size
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.predictions = StandaloneTFMobileBertLMPredictionHead(
self.hidden_size, self.hidden_act, self.initializer_range, self.layer_norm_eps,
self.vocab_size, self.embedding_size, name="predictions")
def call(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
def get_config(self):
return {
"hidden_size": self.hidden_size,
"hidden_act" : self.hidden_act,
"initializer_range": self.initializer_range,
"layer_norm_eps": self.layer_norm_eps,
"vocab_size": self.vocab_size,
"embedding_size": self.embedding_size,
}
|
import torch.nn as nn
class LeNet(nn.Module):
def __init__(self, num_classes=10, num_channels=3):
super(LeNet, self).__init__()
act = nn.Sigmoid
self.body = nn.Sequential(
nn.Conv2d(num_channels, 12, kernel_size=5, padding=5//2, stride=2),
act(),
nn.Conv2d(12, 12, kernel_size=5, padding=5//2, stride=2),
act(),
nn.Conv2d(12, 12, kernel_size=5, padding=5//2, stride=1),
act(),
nn.Conv2d(12, 12, kernel_size=5, padding=5//2, stride=1),
act(),
)
self.fc = nn.Sequential(
nn.Linear(768, num_classes),
#act(),
#nn.Linear(256, 100)
)
self.feature = None
def forward(self, x):
out = self.body(x)
self.feature = out.view(out.size(0), -1)
# print(out.size())
out = self.fc(self.feature)
return out
def extract_feature(self):
return self.feature
|
# Note: After all the names add the word STOP on the next row in the first column
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
import pandas as pd
# Initialize firebase admin sdk
cred = credentials.Certificate('path-to-acm-core-json')
firebase_admin.initialize_app(cred)
# read excel
officer_doc_path = 'path-to-officer-spreadsheet'
df = pd.read_excel(officer_doc_path, header=0)
# Initialize firestore
db = firestore.client()
for index, entry in df.iterrows():
# Only read until where valid data is present
if(entry['First Name'] == 'STOP'):
break
# search up officer that has the correct first + last name
query_results = db.collection(u'officer').where(u'name', u'==', entry['First Name'] + " " + entry['Last Name']).get()
# save linkedin and email fields to variables
linkedin_url = entry['LinkedIn Profile URL']
email = entry['Personal Email']
# Check whether query returned results and if so write to that document
if (len(query_results) > 0):
doc_ref = query_results[0]
db.collection(u'officer').document(doc_ref.id).update({u'linkedin': linkedin_url, u'email': email})
|
def lengthOfLastWord_(s):
words = s.split()
return 0 if len(words) == 0 else len(words[-1])
|
from setuptools import setup
from os import path
import re
def packagefile(*relpath):
return path.join(path.dirname(__file__), *relpath)
def read(*relpath):
with open(packagefile(*relpath)) as f:
return f.read()
def get_version(*relpath):
match = re.search(
r'''^__version__ = ['"]([^'"]*)['"]''',
read(*relpath),
re.M
)
if not match:
raise RuntimeError('Unable to find version string.')
return match.group(1)
setup(
name='toolwrapper',
version=get_version('src', 'toolwrapper.py'),
description='A base class for wrapping text-processing tools',
long_description=read('README.rst'),
url='https://github.com/luismsgomes/toolwrapper',
author='Luís Gomes',
author_email='luismsgomes@gmail.com',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Text Processing :: Filters',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3 :: Only',
],
keywords='subprocess text tool wrapper',
package_dir={'': 'src'},
py_modules=['toolwrapper'],
)
|
#!/usr/bin/python
#
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""what-broke.py - figure out what requirements change likely broke us.
Monday morning, 6am. Loading up zuul status page, and realize there is
a lot of red in the gate. Get second cup of coffee. Oh, some library
must have released a bad version. Man, what released recently?
This script attempts to give that answer by programmatically providing
a list of everything in global-requirements that released recently, in
descending time order.
This does *not* handle the 2nd order dependency problem (in order to
do that we'd have to install the world as well, this is purely a
metadata lookup tool). If we have regularly problematic 2nd order
dependencies add them to the list at the end in the code to be
checked.
"""
import argparse
import datetime
import json
import six.moves.urllib.request as urlreq
import sys
import pkg_resources
class Release(object):
name = ""
version = ""
filename = ""
released = ""
def __init__(self, name, version, filename, released):
self.name = name
self.version = version
self.filename = filename
self.released = released
def __repr__(self):
return "<Released %s %s %s>" % (self.name, self.version, self.released)
def _parse_pypi_released(datestr):
return datetime.datetime.strptime(datestr, "%Y-%m-%dT%H:%M:%S")
def _package_name(line):
return pkg_resources.Requirement.parse(line).project_name
def get_requirements():
reqs = []
with open('global-requirements.txt') as f:
for line in f.readlines():
# skip the comment or empty lines
if not line or line.startswith(('#', '\n')):
continue
# get rid of env markers, they are not relevant for our purposes.
line = line.split(';')[0]
reqs.append(_package_name(line))
return reqs
def get_releases_for_package(name, since):
"""Get the release history from pypi
Use the json API to get the release history from pypi. The
returned json structure includes a 'releases' dictionary which has
keys that are release numbers and the value is an array of
uploaded files.
While we don't have a 'release time' per say (only the upload time
on each of the files), we'll consider the timestamp on the first
source file found (which will be a .zip or tar.gz typically) to be
'release time'. This is inexact, but should be close enough for
our purposes.
"""
f = urlreq.urlopen("http://pypi.python.org/pypi/%s/json" % name)
jsondata = f.read()
data = json.loads(jsondata)
releases = []
for relname, rellist in data['releases'].iteritems():
for rel in rellist:
if rel['python_version'] == 'source':
when = _parse_pypi_released(rel['upload_time'])
# for speed, only care about when > since
if when < since:
continue
releases.append(
Release(
name,
relname,
rel['filename'],
when))
break
return releases
def get_releases_since(reqs, since):
all_releases = []
for req in reqs:
all_releases.extend(get_releases_for_package(req, since))
# return these in a sorted order from newest to oldest
sorted_releases = sorted(all_releases,
key=lambda x: x.released,
reverse=True)
return sorted_releases
def parse_args():
parser = argparse.ArgumentParser(
description=(
'List recent releases of items in global requirements '
'to look for possible breakage'))
parser.add_argument('-s', '--since', type=int,
default=14,
help='look back ``since`` days (default 14)')
return parser.parse_args()
def main():
opts = parse_args()
since = datetime.datetime.today() - datetime.timedelta(days=opts.since)
print("Looking for requirements releases since %s" % since)
reqs = get_requirements()
# additional sensitive requirements
reqs.append('tox')
reqs.append('pycparser')
releases = get_releases_since(reqs, since)
for rel in releases:
print(rel)
if __name__ == '__main__':
sys.exit(main())
|
# :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import mock
from sawmill.log import Log
from sawmill.formatter.email import Email as EmailFormatter
from sawmill.handler.email import Email
def test_output():
'''Test outputting to email.'''
with mock.patch('sawmill.handler.email.smtplib.SMTP', spec=True) as SMTP:
host = 'emailhost'
port = 90210
handler = Email(
host=host,
port=port,
formatter=EmailFormatter(
'Test',
'sender@test.com',
'recipient@test.com'
)
)
log = Log(level='info', message='A message')
handler.handle(log)
SMTP.assert_called_with(host, port)
smtp = SMTP.return_value
assert smtp.sendmail.called is True
assert smtp.quit.called is True
def test_login_when_credentials_present():
'''Test login is called when credentials available.'''
with mock.patch('sawmill.handler.email.smtplib.SMTP', spec=True) as SMTP:
handler = Email(
credentials=('me@test.com', 'mypassword'),
formatter=EmailFormatter(
'Test',
'sender@test.com',
'recipient@test.com'
)
)
log = Log(level='info', message='A message')
handler.handle(log)
smtp = SMTP.return_value
smtp.login.assert_called_with('me@test.com', 'mypassword')
def test_no_login_when_credentials_not_present():
'''Test login is not called when credentials not available.'''
with mock.patch('sawmill.handler.email.smtplib.SMTP', spec=True) as SMTP:
handler = Email(
formatter=EmailFormatter(
'Test',
'sender@test.com',
'recipient@test.com'
)
)
log = Log(level='info', message='A message')
handler.handle(log)
smtp = SMTP.return_value
assert smtp.login.called is False
|
import time
import sys
import multiprocessing
from socket import gethostname
import pickle
import os
import picamera
import numpy as np
import cv2
import markerDetection as md
class FrameBuffer(object):
def __init__(self, resolution):
self.buffer = []
self.resolution = resolution
def __iter__(self):
self.iterationCount = -1
return self
def __next__(self):
self.iterationCount += 1
if self.iterationCount < self.frameCount:
return self.iterationCount
raise StopIteration
def write(self, sensorOutput):
self.buffer.append(sensorOutput)
def flush(self):
# Estabish Recording Length
self.frameCount = len(self.buffer)
print("Finished Recording : {}".format(self.frameCount))
# Create Shared Memory Pool
memoryStart = time.time()
self.manager = multiprocessing.Manager()
i, o = 0, 23
self.pool = self.manager.list(self.buffer[i:o])
del self.buffer[i:o]
while len(self.buffer) > (o + 1):
self.pool.extend(self.buffer[i:o])
del self.buffer[i:o]
self.pool.extend(self.buffer)
del self.buffer
memoryTime = time.time() - memoryStart
print("Finished Creating Memory Pool : {}".format(memoryTime))
def close(self):
self.manager.shutdown()
def raw_resolution(splitter=False):
"""
Round a (width, height) tuple up to the nearest multiple of 32 horizontally
and 16 vertically (as this is what the Pi's camera module does for
unencoded output).
Originally Written by Dave Jones as part of PiCamera
"""
width, height = RESOLUTION
if splitter:
fwidth = (width + 15) & ~15
else:
fwidth = (width + 31) & ~31
fheight = (height + 15) & ~15
return fwidth, fheight
def bytes2yuv(data):
"""
Converts a bytes object containing YUV data to a `numpy`_ array.
Originally Written by Dave Jones as part of PiCamera
"""
width, height = RESOLUTION
fwidth, fheight = raw_resolution()
y_len = fwidth * fheight
uv_len = (fwidth // 2) * (fheight // 2)
if len(data) != (y_len + 2 * uv_len):
raise PiCameraValueError(
'Incorrect buffer length for resolution %dx%d' % (width, height))
# Separate out the Y, U, and V values from the array
a = np.frombuffer(data, dtype=np.uint8)
Y = a[:y_len].reshape((fheight, fwidth))
Uq = a[y_len:-uv_len].reshape((fheight // 2, fwidth // 2))
Vq = a[-uv_len:].reshape((fheight // 2, fwidth // 2))
# Reshape the values into two dimensions, and double the size of the
# U and V values (which only have quarter resolution in YUV4:2:0)
U = np.empty_like(Y)
V = np.empty_like(Y)
U[0::2, 0::2] = Uq
U[0::2, 1::2] = Uq
U[1::2, 0::2] = Uq
U[1::2, 1::2] = Uq
V[0::2, 0::2] = Vq
V[0::2, 1::2] = Vq
V[1::2, 0::2] = Vq
V[1::2, 1::2] = Vq
# Stack the channels together and crop to the actual resolution
return np.dstack((Y, U, V))[:height, :width]
def yuv2rgb(yuv):
"""
Originally Written by Dave Jones as part of PiCamera
"""
# Apply the standard biases
YUV = yuv.astype(float)
YUV[:, :, 0] = YUV[:, :, 0] - 16 # Offset Y by 16
YUV[:, :, 1:] = YUV[:, :, 1:] - 128 # Offset UV by 128
# YUV conversion matrix from ITU-R BT.601 version (SDTV)
# Y U V
M = np.array([[1.164, 0.000, 1.596], # R
[1.164, -0.392, -0.813], # G
[1.164, 2.017, 0.000]]) # B
# Calculate the dot product with the matrix to produce RGB output,
# clamp the results to byte range and convert to bytes
rgb = YUV.dot(M.T).clip(0, 255).astype(np.uint8)
return rgb
def buffer2bgr(frame):
"""Reads frame from the buffer and returns it as an openCV BGR Image."""
yuvImage = bytes2yuv(frame)
rgbImage = yuv2rgb(yuvImage)
image = cv2.cvtColor(rgbImage, cv2.COLOR_RGB2BGR)
return image
def findMarker(bufferIndex):
"""Multiprocessing Core for Marker Identification"""
image = buffer2bgr(f.pool[bufferIndex])
return md.markerID(image)
if __name__ == "__main__":
print("Session ID")
sessionID = input()
print("Resolution")
RESOLUTION = (int(input()), int(input()))
print("Frame Rate")
FRAMERATE = int(input())
print("Max Recording")
MAX_RECORDING = int(input())
print("ISO")
ISO = int(input())
print("Shutter Speed")
SHUTTER_SPEED = int(input())
print("AWB Mode")
AWB_MODE = input()
print("AWB Gains")
AWB_GAINS = (float(input()), float(input()))
# Camera Setup
camera = picamera.PiCamera()
camera.resolution = RESOLUTION
camera.framerate = FRAMERATE
camera.iso = ISO
camera.shutter_speed = SHUTTER_SPEED
camera.awb_mode = AWB_MODE
camera.awb_gains = AWB_GAINS
f = FrameBuffer(RESOLUTION)
# Establish Scyned Record Start
print("Record Delay")
recordDelay = float(input())
time.sleep(recordDelay)
# Capture Still
camera.capture(f, 'yuv')
camera.close()
# Time Tracking
multiStart = time.time()
# Multiprocessing
pool = multiprocessing.Pool()
mocap = pool.map(findMarker, f, chunksize=round(f.frameCount / 4))
pool.close()
f.close()
# Report Time Tracking
multiTime = time.time() - multiStart
print("Multi Core Processing Finished : {}".format(multiTime))
# Export Data
host = gethostname()
filename = "{}_{}.mocap".format(host, sessionID)
with open(filename, "wb") as pik:
pickle.dump(mocap, pik)
print("Data Exported to : {}".format(os.path.join(os.getcwd(), filename)))
|
# Add (two numbers)
# 2021 Yong-Jun Shin
# Ctrl-Shift-P --> Terminal: Create New Integrated Terminal
# gremlinpython==3.5.0 --> requirments.txt
# gremelinpython supports python 3.4 or higher
# pip install --target ".\.venv\Lib\site-packages" -r requirements.txt --upgrade
import logging
import azure.functions as func
import json
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request.')
try:
req_data = req.get_json()
if req_data is not None:
input_value = req_data.get('value') #requesting vertex value
output = input_value + 10
response = {"output": output}
response_json = json.dumps(response)
logging.info (response_json)
return response_json
else: return "req_data is None!"
except Exception as e:
logging.info ('There was an exception with Add: {0}'.format(e))
|
import os
import re
from setuptools import setup
def get_version(verbose=0):
""" Extract version information from source code """
try:
if os.path.exists('spirack/version.py'):
with open('spirack/version.py') as f:
ln = f.readline()
v = ln.split('=')
m = re.search('\'(.*)\'', ln)
FULLVERSION = (m.group(0)).strip().strip('\'').strip('"')
else:
FULLVERSION = '0.0'
except Exception as E:
FULLVERSION = '0.0'
if verbose:
print('get_version_info: %s' % FULLVERSION)
return FULLVERSION
with open("README.md", "r") as fh:
long_description = fh.read()
version = get_version()
setup(name='spirack',
version=version,
description='Drivers for the QuTech SPI-rack',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/mtiggelman/SPI-rack',
author='Marijn Tiggelman',
author_email='qutechdev@gmail.com',
license='MIT',
packages=['spirack'],
keywords=['SPI', 'Qcodes', 'SPI-rack', 'QuTech', 'TU Delft', 'SPI'],
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"),
install_requires=[
'pyserial',
'numpy'
],
package_data={
'': ['*.cat', '*.inf']
})
|
import numpy as np
from smac.epm.base_epm import AbstractEPM
from smac.epm.gaussian_process.gradient_gpr import GaussianProcessRegressor
from smac.epm.gaussian_process.kernels import KernelBase, Cubic, Gaussian
__author__ = "jajajag"
__copyright__ = "Copyright 2018"
__license__ = "3-clause BSD"
__maintainer__ = "jajajag"
__email__ = "zzzsgsb@gmail.com"
__version__ = "0.0.1"
class GaussianGradientEPM(AbstractEPM):
"""Interface to the Gaussian EPM with gradient.
Attributes
----------
kernel1 : smac.utils.kernels.KernelBase
kernel2 : smac.utils.kernels.KernelBase
"""
def __init__(self,
kernel1: KernelBase = Gaussian,
kernel2: KernelBase = Cubic,
**kwargs):
"""Constructor
Parameters
----------
kernel1 : smac.utils.kernels.KernelBase
The first kernel in the Gaussian Process Regressor.
kernel2 : smac.utils.kernels.KernelBase
The second kernel in the Gaussian Process Regressor.
"""
super().__init__(**kwargs)
# 新建高斯回归器
self.gpr = GaussianProcessRegressor(kernel1=kernel1, kernel2=kernel2)
def _train(self, X: np.ndarray, y: np.ndarray, **kwargs):
"""Trains the random forest on X and y.
Parameters
----------
X : np.ndarray [n_samples, n_features (config + instance features)]
Input data points.
Y : np.ndarray [n_samples, ]
The corresponding target values.
Returns
-------
self
"""
# 如果没给出梯度信息,则返回错误
if 'gradient' not in kwargs:
raise ValueError("Parameter gradient is not given.")
# 训练高斯回归器
self.gpr.fit(X, y.flatten(), kwargs['gradient'])
"""
# 输出当前的预测的gp模型
import numpy as np
X = np.array(range(1000)) / 1000 + 0.001
y = self.gpr.predict(X.reshape(-1, 1))
print(y)
"""
return self
def _predict(self, X: np.ndarray):
"""Predict means and variances for given X.
Parameters
----------
X : np.ndarray of shape = [n_samples,
n_features (config + instance features)]
Returns
-------
means : np.ndarray of shape = [n_samples, 1]
Predictive mean
vars : np.ndarray of shape = [n_samples, 1]
Predictive variance
"""
# 预测均值和标准差
mean, std = self.gpr.predict(X, return_std=True)
# 返回均值曲线和方差
return mean.reshape((-1, 1)), np.square(std).reshape((-1, 1))
|
try:
from zcrmsdk.src.com.zoho.crm.api.exception import SDKException
from zcrmsdk.src.com.zoho.crm.api.util import Constants
except Exception:
from ..exception import SDKException
from ..util import Constants
class Note(object):
def __init__(self):
"""Creates an instance of Note"""
self.__owner = None
self.__modified_time = None
self.__attachments = None
self.__created_time = None
self.__parent_id = None
self.__editable = None
self.__sharing_permission = None
self.__se_module = None
self.__is_shared_to_client = None
self.__modified_by = None
self.__size = None
self.__state = None
self.__voice_note = None
self.__id = None
self.__created_by = None
self.__note_title = None
self.__note_content = None
self.__key_modified = dict()
def get_owner(self):
"""
The method to get the owner
Returns:
User: An instance of User
"""
return self.__owner
def set_owner(self, owner):
"""
The method to set the value to owner
Parameters:
owner (User) : An instance of User
"""
try:
from zcrmsdk.src.com.zoho.crm.api.users import User
except Exception:
from ..users import User
if owner is not None and not isinstance(owner, User):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: owner EXPECTED TYPE: User', None, None)
self.__owner = owner
self.__key_modified['Owner'] = 1
def get_modified_time(self):
"""
The method to get the modified_time
Returns:
datetime: An instance of datetime
"""
return self.__modified_time
def set_modified_time(self, modified_time):
"""
The method to set the value to modified_time
Parameters:
modified_time (datetime) : An instance of datetime
"""
from datetime import datetime
if modified_time is not None and not isinstance(modified_time, datetime):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: modified_time EXPECTED TYPE: datetime', None, None)
self.__modified_time = modified_time
self.__key_modified['Modified_Time'] = 1
def get_attachments(self):
"""
The method to get the attachments
Returns:
list: An instance of list
"""
return self.__attachments
def set_attachments(self, attachments):
"""
The method to set the value to attachments
Parameters:
attachments (list) : An instance of list
"""
if attachments is not None and not isinstance(attachments, list):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: attachments EXPECTED TYPE: list', None, None)
self.__attachments = attachments
self.__key_modified['$attachments'] = 1
def get_created_time(self):
"""
The method to get the created_time
Returns:
datetime: An instance of datetime
"""
return self.__created_time
def set_created_time(self, created_time):
"""
The method to set the value to created_time
Parameters:
created_time (datetime) : An instance of datetime
"""
from datetime import datetime
if created_time is not None and not isinstance(created_time, datetime):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: created_time EXPECTED TYPE: datetime', None, None)
self.__created_time = created_time
self.__key_modified['Created_Time'] = 1
def get_parent_id(self):
"""
The method to get the parent_id
Returns:
Record: An instance of Record
"""
return self.__parent_id
def set_parent_id(self, parent_id):
"""
The method to set the value to parent_id
Parameters:
parent_id (Record) : An instance of Record
"""
try:
from zcrmsdk.src.com.zoho.crm.api.record import Record
except Exception:
from ..record import Record
if parent_id is not None and not isinstance(parent_id, Record):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: parent_id EXPECTED TYPE: Record', None, None)
self.__parent_id = parent_id
self.__key_modified['Parent_Id'] = 1
def get_editable(self):
"""
The method to get the editable
Returns:
bool: A bool representing the editable
"""
return self.__editable
def set_editable(self, editable):
"""
The method to set the value to editable
Parameters:
editable (bool) : A bool representing the editable
"""
if editable is not None and not isinstance(editable, bool):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: editable EXPECTED TYPE: bool', None, None)
self.__editable = editable
self.__key_modified['$editable'] = 1
def get_sharing_permission(self):
"""
The method to get the sharing_permission
Returns:
string: A string representing the sharing_permission
"""
return self.__sharing_permission
def set_sharing_permission(self, sharing_permission):
"""
The method to set the value to sharing_permission
Parameters:
sharing_permission (string) : A string representing the sharing_permission
"""
if sharing_permission is not None and not isinstance(sharing_permission, str):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: sharing_permission EXPECTED TYPE: str', None, None)
self.__sharing_permission = sharing_permission
self.__key_modified['$sharing_permission'] = 1
def get_se_module(self):
"""
The method to get the se_module
Returns:
string: A string representing the se_module
"""
return self.__se_module
def set_se_module(self, se_module):
"""
The method to set the value to se_module
Parameters:
se_module (string) : A string representing the se_module
"""
if se_module is not None and not isinstance(se_module, str):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: se_module EXPECTED TYPE: str', None, None)
self.__se_module = se_module
self.__key_modified['$se_module'] = 1
def get_is_shared_to_client(self):
"""
The method to get the is_shared_to_client
Returns:
bool: A bool representing the is_shared_to_client
"""
return self.__is_shared_to_client
def set_is_shared_to_client(self, is_shared_to_client):
"""
The method to set the value to is_shared_to_client
Parameters:
is_shared_to_client (bool) : A bool representing the is_shared_to_client
"""
if is_shared_to_client is not None and not isinstance(is_shared_to_client, bool):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: is_shared_to_client EXPECTED TYPE: bool', None, None)
self.__is_shared_to_client = is_shared_to_client
self.__key_modified['$is_shared_to_client'] = 1
def get_modified_by(self):
"""
The method to get the modified_by
Returns:
User: An instance of User
"""
return self.__modified_by
def set_modified_by(self, modified_by):
"""
The method to set the value to modified_by
Parameters:
modified_by (User) : An instance of User
"""
try:
from zcrmsdk.src.com.zoho.crm.api.users import User
except Exception:
from ..users import User
if modified_by is not None and not isinstance(modified_by, User):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: modified_by EXPECTED TYPE: User', None, None)
self.__modified_by = modified_by
self.__key_modified['Modified_By'] = 1
def get_size(self):
"""
The method to get the size
Returns:
string: A string representing the size
"""
return self.__size
def set_size(self, size):
"""
The method to set the value to size
Parameters:
size (string) : A string representing the size
"""
if size is not None and not isinstance(size, str):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: size EXPECTED TYPE: str', None, None)
self.__size = size
self.__key_modified['$size'] = 1
def get_state(self):
"""
The method to get the state
Returns:
string: A string representing the state
"""
return self.__state
def set_state(self, state):
"""
The method to set the value to state
Parameters:
state (string) : A string representing the state
"""
if state is not None and not isinstance(state, str):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: state EXPECTED TYPE: str', None, None)
self.__state = state
self.__key_modified['$state'] = 1
def get_voice_note(self):
"""
The method to get the voice_note
Returns:
bool: A bool representing the voice_note
"""
return self.__voice_note
def set_voice_note(self, voice_note):
"""
The method to set the value to voice_note
Parameters:
voice_note (bool) : A bool representing the voice_note
"""
if voice_note is not None and not isinstance(voice_note, bool):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: voice_note EXPECTED TYPE: bool', None, None)
self.__voice_note = voice_note
self.__key_modified['$voice_note'] = 1
def get_id(self):
"""
The method to get the id
Returns:
int: An int representing the id
"""
return self.__id
def set_id(self, id):
"""
The method to set the value to id
Parameters:
id (int) : An int representing the id
"""
if id is not None and not isinstance(id, int):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: id EXPECTED TYPE: int', None, None)
self.__id = id
self.__key_modified['id'] = 1
def get_created_by(self):
"""
The method to get the created_by
Returns:
User: An instance of User
"""
return self.__created_by
def set_created_by(self, created_by):
"""
The method to set the value to created_by
Parameters:
created_by (User) : An instance of User
"""
try:
from zcrmsdk.src.com.zoho.crm.api.users import User
except Exception:
from ..users import User
if created_by is not None and not isinstance(created_by, User):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: created_by EXPECTED TYPE: User', None, None)
self.__created_by = created_by
self.__key_modified['Created_By'] = 1
def get_note_title(self):
"""
The method to get the note_title
Returns:
string: A string representing the note_title
"""
return self.__note_title
def set_note_title(self, note_title):
"""
The method to set the value to note_title
Parameters:
note_title (string) : A string representing the note_title
"""
if note_title is not None and not isinstance(note_title, str):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: note_title EXPECTED TYPE: str', None, None)
self.__note_title = note_title
self.__key_modified['Note_Title'] = 1
def get_note_content(self):
"""
The method to get the note_content
Returns:
string: A string representing the note_content
"""
return self.__note_content
def set_note_content(self, note_content):
"""
The method to set the value to note_content
Parameters:
note_content (string) : A string representing the note_content
"""
if note_content is not None and not isinstance(note_content, str):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: note_content EXPECTED TYPE: str', None, None)
self.__note_content = note_content
self.__key_modified['Note_Content'] = 1
def is_key_modified(self, key):
"""
The method to check if the user has modified the given key
Parameters:
key (string) : A string representing the key
Returns:
int: An int representing the modification
"""
if key is not None and not isinstance(key, str):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: key EXPECTED TYPE: str', None, None)
if key in self.__key_modified:
return self.__key_modified.get(key)
return None
def set_key_modified(self, key, modification):
"""
The method to mark the given key as modified
Parameters:
key (string) : A string representing the key
modification (int) : An int representing the modification
"""
if key is not None and not isinstance(key, str):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: key EXPECTED TYPE: str', None, None)
if modification is not None and not isinstance(modification, int):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: modification EXPECTED TYPE: int', None, None)
self.__key_modified[key] = modification
|
#
# Transmission Line Simulator
#
# Author(s): Jiacong Xu
# Created: May-28-2017
#
from util.constants import *
from collections import deque
from circuit import Circuit
import numpy as np
class AppState(object):
"""
Describes the valid states in this app.
"""
Editing = 'editing'
Simulating = 'simulating'
Paused = 'paused'
class Model(object):
"""
An instance that keeps data on the simulation, including circuit info,
wave generation, animations, and so on.
circuit: model representing the current state of electrical
components in the system.
waveSpeed: speed of wave, in m/s.
simSpeed: simulation speed, a multiplier for elapsed time.
appState: the application state.
elapsed: amount of elapsed time, in seconds.
maxAmplitude: the maximum amplitude reached within the current
simulation.
"""
def __init__(self):
"""
Initializes a brand new model for a fresh app start.
"""
self.graph = [np.array([]), np.array([])]
self.circuit = Circuit()
self.simSpeed = 1.0 / NS_IN_S
self.elapsed = 0
self._lastStep = 0
self.maxAmplitude = 10
self.appState = AppState.Editing
def simulate(self, dt):
"""
Simulate the system by step dt, in seconds.
"""
last = self.elapsed
self.elapsed += dt * self.simSpeed
# Determine how many steps must be made.
segs = int(STEPS_PER_NS * (self.elapsed - self._lastStep) * NS_IN_S)
for s in range(segs):
self._lastStep += 1.0 / STEPS_PER_NS / NS_IN_S
self._step()
# Recompute overall
self.graph = [np.array([]), np.array([])]
e = self.circuit.head.next
while e.next != None:
self.graph[0] = np.concatenate((self.graph[0], e.xs))
v = e.forward + e.backward
if len(v) > 0:
self.maxAmplitude = max(self.maxAmplitude, v.max(), v.min())
self.graph[1] = np.concatenate((self.graph[1], v))
e = e.next
# Update every oscilloscope
h = self.circuit.headOscilloscope
i = 1
while h != None:
while i < len(self.graph[0]) and self.graph[0][i] < h.position:
i += 1
h.record(self._lastStep, self.graph[1][i - 1])
h = h.next
def reset(self):
"""
Resets the simulation, but not the circuit.
"""
self.elapsed = 0
self._lastStep = 0
self.graph = [np.array([]), np.array([])]
self.circuit.reset()
self.maxAmplitude = 10
def _step(self):
"""
Simulates a discrete step for each part of the circuit.
"""
# We go through each discretized value in forward and backward
# currents, deciding whether it should move or not, and how it
# should move.
e = self.circuit.head
while e != None:
e.split()
e = e.next
# Now shift
e = self.circuit.head
while e.next != None:
e.rotateBackward()
e = e.next
while e != None:
e.rotateForward()
e = e.prev
|
"""empty message
Revision ID: 9d8fc446eec1
Revises: cb34c1d864fc
Create Date: 2019-09-18 00:45:49.930785
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "9d8fc446eec1"
down_revision = "cb34c1d864fc"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("shops_to_price", sa.Column("kind_id", postgresql.UUID(as_uuid=True), nullable=True))
op.create_index(op.f("ix_shops_to_price_kind_id"), "shops_to_price", ["kind_id"], unique=False)
op.create_foreign_key(None, "shops_to_price", "kinds", ["kind_id"], ["id"])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, "shops_to_price", type_="foreignkey")
op.drop_index(op.f("ix_shops_to_price_kind_id"), table_name="shops_to_price")
op.drop_column("shops_to_price", "kind_id")
# ### end Alembic commands ###
|
"""
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
ISO plate mapper
"""
from sqlalchemy.orm import relationship
from everest.repositories.rdb.utils import mapper
from thelma.entities.iso import ISO_PLATE_TYPES
from thelma.entities.iso import IsoPlate
from thelma.entities.rack import Rack
__docformat__ = 'reStructuredText en'
__all__ = ['create_mapper']
def create_mapper(iso_plate_tbl):
"Mapper factory."
m = mapper(IsoPlate, iso_plate_tbl,
id_attribute='iso_plate_id',
polymorphic_on=iso_plate_tbl.c.iso_plate_type,
polymorphic_identity=ISO_PLATE_TYPES.ISO_PLATE,
properties=dict(
rack=relationship(Rack, uselist=False),
)
)
return m
|
from flask import (Blueprint, g, render_template, url_for)
bp = Blueprint('klimatologi', __name__, url_prefix='/klimatologi')
@bp.route('/')
def index():
return render_template('klimatologi/index.html')
|
"""Feedback URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url
from django.shortcuts import render
from django.contrib.staticfiles.urls import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from User.views import signup, user_login, user_logout
from Student.views import student_view
from Professor.views import professor_view
from . import settings
from Course.views import AddCourse, SearchCourse, joinCourse, courseHome
def home_view(request):
return render(request, "index.html", {})
urlpatterns = [
path('', user_login, name='home'),
path('admin/', admin.site.urls),
path('search/', SearchCourse, name='search_results'),
path('courseadd/<int:id>/<int:group>', joinCourse, name='join_course'),
path('signup/', signup, name='signup'),
path('student/<int:id>', student_view, name='student'),
path('professor/<int:id>', professor_view, name='professor'),
path('CourseForm/', AddCourse, name='addcourseform'),
path('Course/<int:cid>/<int:gid>', courseHome,name="courseHome"),
url(r'^logout/$', user_logout, name='logout')
]
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
class Solution:
def findMedianSortedArrays(self, A, B):
total_len = len(A) + len(B)
len_half = total_len // 2
if total_len % 2 == 1:
return self.kth(A, B, len_half)
else:
return (self.kth(A, B, len_half) + self.kth(A, B, len_half - 1)) / 2.
def kth(self, a, b, k):
if not a:
return b[k]
if not b:
return a[k]
a_half, b_half = len(a) // 2, len(b) // 2
a_med, b_med = a[a_half], b[b_half]
# when k is bigger than the sum of a and b's median indices
if a_half + b_half < k:
# if a's median is bigger than b's, b's first half doesn't include k
if a_med > b_med:
return self.kth(a, b[b_half + 1:], k - b_half - 1)
else:
return self.kth(a[a_half + 1:], b, k - a_half - 1)
# when k is smaller than the sum of a and b's indices
else:
# if a's median is bigger than b's, a's second half doesn't include k
if a_med > b_med:
return self.kth(a[:a_half], b, k)
else:
return self.kth(a, b[:b_half], k)
|
"""
.. _ref_custom_visualization:
Custom Scalar Visualization
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Display custom scalars using an existing mesh.
"""
import pyvista
import numpy as np
from ansys.mapdl import reader as pymapdl_reader
from ansys.mapdl.reader import examples
# Download an example shaft modal analysis result file
shaft = examples.download_shaft_modal()
###############################################################################
# Each result file contains both a ``mesh`` property and a ``grid``
# property. The ``mesh`` property can be through as the MAPDL
# representation of the FEM while the ``grid`` property can be through
# of the Python visualizing property used to plot within Python.
print('shaft.mesh:\n', shaft.mesh)
print('-'*79)
print('shaft.grid:\n', shaft.grid)
###############################################################################
# Plotting
# ~~~~~~~~
#
# The grid instance is a `pyvista.UnstructuredGrid` part of the
# `pyvista` library. This class allows for advanced plotting using
# VTK in just a few lines of code. For example, you can plot the
# underlying mesh with:
shaft.grid.plot(color='w', smooth_shading=True)
###############################################################################
# Plotting Node Scalars
# ~~~~~~~~~~~~~~~~~~~~~
#
# If you point-wise or cell-wise scalars (nodes and elements in FEA),
# you can plot these scalars by setting the ``scalars=`` parameter.
# Here, I'm simply using the x location of the nodes to color the
# mesh.
#
# It follows that you can use any set of scalars provided that it
# matches the number of nodes in the unstructured grid or the number
# of cells in the unstructured grid. Here, we're plotting node values.
x_scalars = shaft.grid.points[:, 0]
shaft.grid.plot(scalars=x_scalars, smooth_shading=True)
###############################################################################
# Plotting With Missing Values
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# If you do not have values for every node (for example, the midside
# nodes), you can leave these values as NAN and the plotter will take
# care of plotting only the real values.
#
# For example, if you have calculated strain scalars that are only
# available at certain nodes, you can still plot those. This example
# just nulls out the first 2000 nodes to be able to visualize the
# missing values. If your are just missing midside values, your plot
# will not show the missing values since `pyvista` only plots the edge
# nodes.
pontoon = examples.download_pontoon()
nnum, strain = pontoon.nodal_elastic_strain(0)
scalars = strain[:, 0]
scalars[:2000] = np.nan # here, we simulate unknown values
pontoon.grid.plot(scalars=scalars, show_edges=True, lighting=False)
|
from swimport.pools.pools import pools, syspools
from swimport.pools.types import TypeSwimportingPool
from swimport.typeswim import TypeSwimporting, FunctionBody
@syspools.add(TypeSwimportingPool)
def slice_no_step(start_type, end_type, cpp_name, *, swim, **type_args):
start_type, start_porting = swim.get_porting(start_type)
end_type, end_porting = swim.get_porting(end_type)
cpp_name = cpp_name.replace('?', f'{start_type}, {end_type}')
if start_porting.to_py_func and end_porting.to_py_func:
body = f"""
ok = 1;
PyObject* start = input.has_start ?
{start_porting.to_py_func.function_name}(input.start, ok, PACKED_UD(0)) :
nullptr;
if (!ok || PyErr_Occurred() != nullptr)
return nullptr;
PyObject* end = input.has_start ?
{end_porting.to_py_func.function_name}(input.end, ok, PACKED_UD(1)) :
nullptr;
if (!ok || PyErr_Occurred() != nullptr){{
Py_DECREF(start);
return nullptr;
}}
PyObject* ret = PySlice_New(start, end, nullptr);
Py_XDECREF(start);
Py_XDECREF(end);
return ret;
"""
to_py = FunctionBody.combine(start_porting.to_py_func, end_porting.to_py_func)(body)
else:
to_py = None
if start_porting.to_cpp_func and end_porting.to_cpp_func:
body = f"""
ok = 1;
if (!PySlice_Check(input))
SWIM_RAISE_TYPE("expected a slice, not ", input);
PyObject* step = PyObject_GetAttrString(input, "step");
if (step != Py_None)
{{
Py_DECREF(step);
SWIM_RAISE(PyExc_ValueError, "slice cannot have a specified step");
}}
Py_DECREF(step);
{cpp_name} ret;
PyObject* start = PyObject_GetAttrString(input, "start");
if (start == Py_None)
ret.has_start = false;
else{{
ret.start = {start_porting.to_cpp_func.function_name}(start, ok, PACKED_UD(0));
if (!ok || PyErr_Occurred() != nullptr) {{
Py_DECREF(start);
return ret;
}}
ret.has_start = true;
}}
Py_DECREF(start);
PyObject* end = PyObject_GetAttrString(input, "stop");
if (end == Py_None)
ret.has_end = false;
else{{
ret.end = {end_porting.to_cpp_func.function_name}(end, ok, PACKED_UD(1));
if (!ok || PyErr_Occurred() != nullptr) {{
Py_DECREF(end);
return ret;
}}
ret.has_end = true;
}}
Py_DECREF(end);
return ret;
"""
to_cpp = FunctionBody.combine(start_porting.to_cpp_func, end_porting.to_cpp_func)(body)
else:
to_cpp = None
if start_porting.to_cpp_check_func and end_porting.to_cpp_check_func:
body = f"""
if (!PySlice_Check(input))
return 0;
PyObject* step = PyObject_GetAttrString(input, "step");
if (step != Py_None)
{{
Py_DECREF(step);
return 0;
}}
Py_DECREF(step);
PyObject* start = PyObject_GetAttrString(input, "start");
if (start != Py_None)
{{
if (!{start_porting.to_cpp_check_func.function_name}(start, PACKED_UD(0))){{
Py_DECREF(start);
return 0;
}}
}}
Py_DECREF(start);
PyObject* end = PyObject_GetAttrString(input, "stop");
if (end != Py_None){{
if (!{end_porting.to_cpp_check_func.function_name}(end, PACKED_UD(1))){{
Py_DECREF(end);
return 0;
}}
}}
Py_DECREF(end);
return 1;
"""
to_cpp_check = FunctionBody.combine(start_porting.to_cpp_check_func, end_porting.to_cpp_check_func)(body)
else:
to_cpp_check = None
if start_porting.to_cpp_post_func or end_porting.to_cpp_post_func:
body = f"""
ok = 1;
"""
if start_porting.to_cpp_post_func:
body += f"""
if (input.has_start){{
{start_porting.to_cpp_post_func}(input.start, ok, PACKED_UD(0));
}}
"""
if end_porting.to_cpp_post_func:
body += f"""
if (input.has_end){{
{end_porting.to_cpp_post_func}(input.end, ok, PACKED_UD(1));
}}
"""
to_cpp_post = FunctionBody.combine(start_porting.to_cpp_post_func, end_porting.to_cpp_post_func)(body)
else:
to_cpp_post = None
return TypeSwimporting(cpp_name, 'slice',
to_py=to_py,
to_cpp=to_cpp,
to_cpp_check=to_cpp_check,
to_cpp_post=to_cpp_post,
**type_args)
@syspools.add(TypeSwimportingPool)
def slice_step(start_type, end_type, step_type, cpp_name, *, swim, **type_args):
start_type, start_porting = swim.get_porting(start_type)
end_type, end_porting = swim.get_porting(end_type)
step_type, step_porting = swim.get_porting(step_type)
cpp_name = cpp_name.replace('?', f'{start_type}, {end_type}, {step_type}')
if start_porting.to_py_func and end_porting.to_py_func and step_porting.to_py_func:
body = f"""
ok = 1;
PyObject* start = input.has_start ?
{start_porting.to_py_func.function_name}(input.start, ok, PACKED_UD(0)) :
nullptr;
if (!ok || PyErr_Occurred() != nullptr)
return nullptr;
PyObject* end = input.has_start ?
{end_porting.to_py_func.function_name}(input.end, ok, PACKED_UD(1)) :
nullptr;
if (!ok || PyErr_Occurred() != nullptr){{
Py_DECREF(start);
return nullptr;
}}
PyObject* step = input.has_step ?
{step_porting.to_py_func.function_name}(input.step, ok, PACKED_UD(2)) :
nullptr;
if (!ok || PyErr_Occurred() != nullptr){{
Py_DECREF(start);
Py_DECREF(end);
return nullptr;
}}
PyObject* ret = PySlice_New(start, end, step);
Py_XDECREF(start);
Py_XDECREF(end);
Py_XDECREF(step);
return ret;
"""
to_py = FunctionBody.combine(start_porting.to_py_func, end_porting.to_py_func, step_porting.to_py_func)(body)
else:
to_py = None
if start_porting.to_cpp_func and end_porting.to_cpp_func and step_porting.to_cpp_func:
body = f"""
ok = 1;
if (!PySlice_Check(input))
SWIM_RAISE_TYPE("expected a slice, not ", input);
{cpp_name} ret;
PyObject* start = PyObject_GetAttrString(input, "start");
if (start == Py_None)
ret.has_start = false;
else{{
ret.start = {start_porting.to_cpp_func.function_name}(start, ok, PACKED_UD(0));
if (!ok || PyErr_Occurred() != nullptr) {{
Py_DECREF(start);
return ret;
}}
ret.has_start = true;
}}
Py_DECREF(start);
PyObject* end = PyObject_GetAttrString(input, "stop");
if (end == Py_None)
ret.has_end = false;
else{{
ret.end = {end_porting.to_cpp_func.function_name}(end, ok, PACKED_UD(1));
if (!ok || PyErr_Occurred() != nullptr) {{
Py_DECREF(end);
return ret;
}}
ret.has_end = true;
}}
Py_DECREF(end);
PyObject* step = PyObject_GetAttrString(input, "step");
if (step == Py_None)
ret.has_step = false;
else{{
ret.step = {step_porting.to_cpp_func.function_name}(step, ok, PACKED_UD(2));
if (!ok || PyErr_Occurred() != nullptr) {{
Py_DECREF(step);
return ret;
}}
ret.has_step = true;
}}
Py_DECREF(step);
return ret;
"""
to_cpp = FunctionBody.combine(start_porting.to_cpp_func, end_porting.to_cpp_func, step_porting.to_cpp_func)(
body)
else:
to_cpp = None
if start_porting.to_cpp_check_func and end_porting.to_cpp_check_func:
body = f"""
if (!PySlice_Check(input))
return 0;
PyObject* start = PyObject_GetAttrString(input, "start");
if (start != Py_None)
{{
if (!{start_porting.to_cpp_check_func.function_name}(start, PACKED_UD(0))){{
Py_DECREF(start);
return 0;
}}
}}
Py_DECREF(start);
PyObject* end = PyObject_GetAttrString(input, "stop");
if (end != Py_None){{
if (!{end_porting.to_cpp_check_func.function_name}(end, PACKED_UD(1))){{
Py_DECREF(end);
return 0;
}}
}}
Py_DECREF(end);
PyObject* step = PyObject_GetAttrString(input, "step");
if (step != Py_None){{
if (!{step_porting.to_cpp_check_func.function_name}(step, PACKED_UD(2))){{
Py_DECREF(step);
return 0;
}}
}}
Py_DECREF(step);
return 1;
"""
to_cpp_check = FunctionBody.combine(start_porting.to_cpp_check_func, end_porting.to_cpp_check_func,
step_porting.to_cpp_check_func)(body)
else:
to_cpp_check = None
if start_porting.to_cpp_post_func or end_porting.to_cpp_post_func:
body = f"""
ok = 1;
"""
if start_porting.to_cpp_post_func:
body += f"""
if (input.has_start){{
{start_porting.to_cpp_post_func}(input.start, ok, PACKED_UD(0));
}}
"""
if end_porting.to_cpp_post_func:
body += f"""
if (input.has_end){{
{end_porting.to_cpp_post_func}(input.end, ok, PACKED_UD(1));
}}
"""
if step_porting.to_cpp_post_func:
body += f"""
if (input.has_step){{
{step_porting.to_cpp_post_func}(input.step, ok, PACKED_UD(2));
}}
"""
to_cpp_post = FunctionBody.combine(start_porting.to_cpp_post_func, end_porting.to_cpp_post_func,
step_porting.to_cpp_post_func)(body)
else:
to_cpp_post = None
return TypeSwimporting(cpp_name, 'slice',
to_py=to_py,
to_cpp=to_cpp,
to_cpp_check=to_cpp_check,
to_cpp_post=to_cpp_post,
**type_args)
@pools.add(TypeSwimportingPool, name='slice')
def slice_(start_type, end_type=..., step_type=None, cpp_name='slice<?>', *, swim, **type_args):
"""
typemaps for using slices for a set of types
:param start_type: the name of the imported type for the start of the slice
:param end_type: the name of the imported type for the start of the slice. default is to use start_type
:param end_type: the name of the imported type for the start of the slice. default is to not allow steps.
... to use start_type
:param cpp_name: the name of the cpp type to map for. ? will be replaced with the type names
:param type_args: keyword parameters forwarded to the TypeSwimporting created
"""
if end_type is ...:
end_type = start_type
if step_type is ...:
step_type = start_type
if step_type:
return slice_step(start_type, end_type, step_type, cpp_name, swim, **type_args)
else:
return slice_no_step(start_type, end_type, cpp_name, swim, **type_args)
|
import fnmatch
import itertools
import pathlib
import subprocess
from obsgit.exporter import Exporter
class StorageLFS:
"""File storage in git LFS"""
def __init__(self, git):
self.git = git
# When using the OBS storage we can avoid some downloads, but
# is not the case for LFS. In this model the index will be
# empty always.
self.index = set()
self.tracked = set()
self._update_tracked()
def _update_tracked(self):
out = subprocess.run(
["git", "lfs", "track"],
cwd=self.git.path,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding="utf-8",
check=False
)
for line in out.stdout.splitlines():
if line.startswith(" " * 4):
self.tracked.add(line.split()[0])
async def is_installed(self):
out = subprocess.run(
["git", "lfs", "install"],
cwd=self.git.path,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=False
)
is_installed = out.returncode == 0
# Track the default extensions already, we can later include
# specific files
if is_installed:
for binary in Exporter.BINARY | Exporter.NON_BINARY_EXCEPTIONS:
await self._store(pathlib.Path(f"*{binary}"))
return is_installed
def overlaps(self):
return [
(a, b)
for a, b in itertools.combinations(self.tracked, 2)
if fnmatch.fnmatch(a, b)
]
def transfer(self, md5, project, package, filename, obs):
pass
def _tracked(self, filename):
return any(fnmatch.fnmatch(filename, track) for track in self.tracked)
async def _store(self, filename_path):
# When registering general patterms, like "*.gz" we do not
# have a path relative to the git repository
try:
filename_path = filename_path.relative_to(self.git.path)
except ValueError:
pass
# TODO: we can edit `.gitattributes` manually
subprocess.run(
["git", "lfs", "track", filename_path],
cwd=self.git.path,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=False
)
self.tracked.add(str(filename_path))
await self.commit()
async def store_files(self, package, files_md5):
package_path = self.git.prefix / package
for filename, _ in files_md5:
if not self._tracked(filename):
await self._store(package_path / filename)
async def fetch(self):
pass
async def delete(self, filename_path):
subprocess.run(
["git", "lfs", "untrack", filename_path],
cwd=self.git.path,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=False
)
async def commit(self):
subprocess.run(
["git", "add", ".gitattributes"],
cwd=self.git.path,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=False
)
|
# Generated by Django 2.2 on 2019-04-26 00:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('anteproyecto', '0028_auto_20190424_2318'),
]
operations = [
migrations.AddField(
model_name='anteproyecto',
name='estado',
field=models.CharField(choices=[('EJ', 'Ejercido'), ('NE', 'Noejercido'), ('ED', 'Endesarrollo'), ('CP', 'Completo')], default='ED', max_length=2),
),
]
|
from gym_minigrid.minigrid import *
from gym_minigrid.register import register
import numpy as np
from gym import spaces
def manhattan_dist(a, b):
return np.abs(a[0] - b[0]) + np.abs(a[1] - b[1])
class StickyFloorEnv(MiniGridEnv):
"""
Empty grid environment, no obstacles, sparse reward
"""
def __init__(
self,
size=8,
agent_start_pos=[(1, 1), (8-2, 8-2)],
agent_start_dirs=[0, 0],
goals = [(8-2, 8-2), (1, 1)],
min_prob=0.01,
reward_type='manhattan_dist',
):
self.agent_start_pos = agent_start_pos
self.agent_start_dirs = agent_start_dirs
self.goals = goals
self._min_prob = min_prob
self._num_grid_configs = len(goals)
self._grid_config_index = 0
self._reward_type = reward_type
super().__init__(
grid_size=size,
max_steps=4*size*size,
# Set this to True for maximum speed
see_through_walls=True
)
self.observation_space = spaces.Dict({
'agent_pos': spaces.Box(
low=0,
high=32,
shape=(2,),
dtype='float32'
),
'direction': spaces.Box(
low=0,
high=4,
shape=(1,),
dtype='float32'
),
'goal_pos': spaces.Box(
low=0,
high=32,
shape=(2,),
dtype='float32'
),
})
def _gen_grid(self, width, height):
# Create an empty grid
self.grid = Grid(width, height)
middle_x = (width - 1) / 2
middle_y = (height - 1) / 2
for x in range(1, width-1):
for y in range(1, height-1):
norm_delta_middle_x = (x - middle_x) / (width - 2 - middle_x)
norm_delta_middle_y = (y - middle_y) / (height - 2 - middle_y)
sticky_prob = self._sticky_prob(norm_delta_middle_x, norm_delta_middle_y)
self.grid.sticky_floor(x, y, sticky_prob)
# Generate the surrounding walls
self.grid.wall_rect(0, 0, width, height)
# Place a goal square in the bottom-right corner
self._grid_config_index = (self._grid_config_index + 1) % self._num_grid_configs
self.grid.set(*self.goals[self._grid_config_index], Goal())
self.agent_pos = self.agent_start_pos[self._grid_config_index]
self.agent_dir = self.agent_start_dirs[self._grid_config_index]
self.mission = "get to the green goal square"
def _sticky_prob(self, norm_delta_middle_x, norm_delta_middle_y):
return np.clip(np.abs((norm_delta_middle_x - norm_delta_middle_y) / 2), 0, 1 - self._min_prob)
def gen_obs(self):
"""
Generate the agent's view
"""
obs = {
'agent_pos': np.array(self.agent_pos),
'direction': np.array([self.agent_dir]),
'goal_pos': np.array(self.goals[self._grid_config_index]),
# 'mission': self.mission
}
return obs
def step(self, action):
obs, reward, done, info = super().step(action)
if self._reward_type == "manhattan_dist":
goal = self.goals[self._grid_config_index]
reward = -manhattan_dist(self.agent_pos, goal)
return obs, reward, done, {}
class UnStickyFloorEnv8x8(StickyFloorEnv):
def __init__(self, **kwargs):
super().__init__(
size=8,
agent_start_pos=[(1, 1)],#, (8-2, 8-2)],
agent_start_dirs=[0],#, 0],
goals = [(8-2, 8-2)],#, (1, 1)],
**kwargs)
def _sticky_prob(self, norm_delta_middle_x, norm_delta_middle_y):
return 0
class StickyFloorEnv8x8(StickyFloorEnv):
def __init__(self, **kwargs):
super().__init__(size=8,
agent_start_pos=[(1, 1), (8-2, 8-2)],
agent_start_dirs=[0, 0],
goals = [(8-2, 8-2), (1, 1)],
**kwargs)
class StickyFloorEnv16x16(StickyFloorEnv):
def __init__(self, **kwargs):
super().__init__(
size=16,
agent_start_pos=[(1, 1), (16-2, 16-2)],
agent_start_dirs=[0, 0],
goals = [(16-2, 16-2), (1, 1)],
**kwargs
)
class StickyFloorExpGradEnv(StickyFloorEnv):
def _sticky_prob(self, norm_delta_middle_x, norm_delta_middle_y):
norm_dist_from_diagonal = np.clip(np.abs((norm_delta_middle_x - norm_delta_middle_y) / 2), 0, 0.999)
a = 0.5
return np.clip(np.exp(-a / norm_dist_from_diagonal), 0, 1 - self._min_prob)
class StickyFloorExpGradEnv16x16(StickyFloorExpGradEnv):
def __init__(self):
super().__init__(size=16)
register(
id='MiniGrid-StickyFloor-8x8-v0',
entry_point='gym_minigrid.envs:StickyFloorEnv8x8'
)
register(
id='MiniGrid-UnStickyFloor-8x8-v0',
entry_point='gym_minigrid.envs:UnStickyFloorEnv8x8'
)
register(
id='MiniGrid-StickyFloor-16x16-v0',
entry_point='gym_minigrid.envs:StickyFloorEnv16x16'
)
register(
id='MiniGrid-StickyFloorExpGrad-16x16-v0',
entry_point='gym_minigrid.envs:StickyFloorExpGradEnv16x16'
)
|
"""Bancho exceptions"""
# TODO: Prints in exceptions
class loginFailedException(Exception):
pass
class loginBannedException(Exception):
pass
class tokenNotFoundException(Exception):
pass
class channelNoPermissionsException(Exception):
pass
class channelUnknownException(Exception):
pass
class channelModeratedException(Exception):
pass
class noAdminException(Exception):
pass
class commandSyntaxException(Exception):
pass
class banchoConfigErrorException(Exception):
pass
class banchoMaintenanceException(Exception):
pass
class moderatedPMException(Exception):
pass
class userNotFoundException(Exception):
pass
class alreadyConnectedException(Exception):
pass
class stopSpectating(Exception):
pass
class matchWrongPasswordException(Exception):
pass
class matchNotFoundException(Exception):
pass
class matchJoinErrorException(Exception):
pass
class matchCreateError(Exception):
pass
class banchoRestartingException(Exception):
pass
|
def greet_developers(lst: list) -> list:
for i in lst:
i.update({'greeting': f'Hi {i["firstName"]}, what do you like the most about {i["language"]}?'})
return lst
|
""" rasterio environment management tools
"""
import threading
import rasterio
from rasterio.session import AWSSession
import rasterio.env
_local = threading.local()
SECRET_KEYS = ('AWS_ACCESS_KEY_ID',
'AWS_SECRET_ACCESS_KEY',
'AWS_SESSION_TOKEN')
def _sanitize(opts, keys):
return {k: (v if k not in keys
else 'xx..xx')
for k, v in opts.items()}
def get_rio_env(sanitize=True):
""" Get GDAL params configured by rasterio for the current thread.
:param sanitize: If True replace sensitive Values with 'x'
"""
env = rasterio.env.local._env
if env is None:
return {}
opts = env.get_config_options()
if sanitize:
opts = _sanitize(opts, SECRET_KEYS)
return opts
def activate_rio_env(aws=None, defaults=True, **kwargs):
""" Inject activated rasterio.Env into current thread.
This de-activates previously setup environment.
:param aws: Dictionary of options for rasterio.session.AWSSession
OR False in which case session won't be setup
OR None -- session = rasterio.session.AWSSession()
:param defaults: Supply False to not inject COG defaults
:param **kwargs: Passed on to rasterio.Env(..) constructor
"""
env_old = getattr(_local, 'env', None)
if env_old is not None:
env_old.__exit__(None, None, None)
_local.env = None
if aws is False:
session = None
else:
aws = {} if aws is None else dict(**aws)
region_name = aws.get('region_name', 'auto')
if region_name == 'auto':
from odc.aws import auto_find_region
try:
aws['region_name'] = auto_find_region()
except Exception as e:
# only treat it as error if it was requested by user
if 'region_name' in aws:
raise e
session = AWSSession(**aws)
opts = dict(
GDAL_DISABLE_READDIR_ON_OPEN='EMPTY_DIR'
) if defaults else {}
opts.update(**kwargs)
env = rasterio.Env(session=session, **opts)
env.__enter__()
_local.env = env
return get_rio_env()
|
from pydgeot.commands import register
@register(name='commands', help_msg='List available commands', allow_appless=True)
def list_commands(app):
"""
Print available commands information.
:param app: App instance to get commands for.
:type app: pydgeot.app.App | None
"""
from pydgeot import commands
commands = sorted(commands.available.values(), key=lambda x: x.name)
if len(commands) == 0:
return
name_align = max(14, max([len(command.name) for command in commands]))
args_align = max([len(command.help_args) for command in commands])
for command in commands:
if app is None and not command.allow_appless:
continue
print('{} {} {}'.format(command.name.rjust(name_align), command.help_args.ljust(args_align),
command.help_msg))
|
N = int(input())
for i in range(1,10):
print("{N} * {i} = {Ni}".format(N=N,i=i,Ni=N*i))
|
# -*- coding: utf-8 -*-
# Copyright 2022 <Huawei Technologies Co., Ltd>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of advanced use of quantum circuits."""
from mindquantum import RX, RZ, SWAP, UN, Circuit, H, X
from mindquantum.algorithm.library import qft
from mindquantum.core.circuit import (
add_prefix,
apply,
change_param_name,
controlled,
dagger,
shift,
)
# controlled()
u1 = qft(range(2)) # 构建量子线路
print(u1)
u2 = controlled(u1)(2) # 对线路添加控制量子位q2,返回一个新的线路
print(u2)
u3 = controlled(u1)
u4 = u3(2)
print(u4)
u = controlled(qft)
u = u([2, 3], [0, 1]) # 批量添加控制位
print(u)
# dagger()
u1 = qft(range(3))
print(u1)
u2 = dagger(u1)
print(u2)
u3 = dagger(qft)
u4 = u3(range(3))
print(u4)
# apply()
u1 = qft([0, 1])
circuit1 = apply(u1, [1, 0]) # 将量子线路u1作用在比特q1, q0上
print(circuit1, "\n")
u2 = apply(qft, [1, 0]) # 将qft作用在比特q0, q1上
circuit2 = u2([0, 1])
print(circuit2)
# add_prefix()
circ = Circuit().rx("theta", 0)
print(circ)
circ = add_prefix(circ, 'l0') # 添加后,参数"theta"就变成了"l0_theta"
print(circ)
def u(qubit):
"""Qubit generator function."""
return Circuit([H.on(0), RX('a').on(qubit)])
u1 = u(0)
u1 = add_prefix(u1, 'ansatz')
print(u1)
u2 = add_prefix(u, 'ansatz')
u2 = u2(0)
print(u2)
# change_param_name()
def u(qubit):
"""Qubit generator function."""
return Circuit([H.on(0), RX('a').on(qubit)])
u1 = u(0)
u1 = change_param_name(u1, {'a': 'b'})
print(u1)
u2 = change_param_name(u, {'a': 'b'})
u2 = u2(0)
print(u2)
# UN()
circuit1 = Circuit()
circuit1 += UN(H, 4) # 将H门作用在每一位量子比特上
print(circuit1)
circuit2 = UN(X, maps_obj=[0, 1], maps_ctrl=[2, 3])
print(circuit2)
circuit3 = UN(SWAP, maps_obj=[[0, 1], [2, 3]]).x(2, 1)
print(circuit3)
# shift()
circ = Circuit().x(1, 0)
print(circ)
circ = shift(circ, 1)
print(circ) # 线路作用的量子比特从q0,q1变为q1,q2
# 搭建Encoder
template = Circuit([X.on(1, 0), RZ('alpha').on(1), X.on(1, 0)])
encoder = (
UN(H, 4)
+ (RZ(f'{i}_alpha').on(i) for i in range(4))
+ sum(add_prefix(shift(template, i), f'{i + 4}') for i in range(3))
)
print(encoder)
encoder.summary()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.